Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/net/wireless/ath/ath12k/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 316 kB image not shown  

Quelle  wmi.c   Sprache: C

 
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include <linux/of.h>
#include "core.h"
#include "debugfs.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "peer.h"
#include "p2p.h"
#include "testmode.h"

struct ath12k_wmi_svc_ready_parse {
 bool wmi_svc_bitmap_done;
};

struct wmi_tlv_fw_stats_parse {
 const struct wmi_stats_event *ev;
 struct ath12k_fw_stats *stats;
};

struct ath12k_wmi_dma_ring_caps_parse {
 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
 u32 n_dma_ring_caps;
};

struct ath12k_wmi_service_ext_arg {
 u32 default_conc_scan_config_bits;
 u32 default_fw_config_bits;
 struct ath12k_wmi_ppe_threshold_arg ppet;
 u32 he_cap_info;
 u32 mpdu_density;
 u32 max_bssid_rx_filters;
 u32 num_hw_modes;
 u32 num_phy;
};

struct ath12k_wmi_svc_rdy_ext_parse {
 struct ath12k_wmi_service_ext_arg arg;
 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
 u32 n_hw_mode_caps;
 u32 tot_phy_id;
 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
 u32 n_mac_phy_caps;
 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
 u32 n_ext_hal_reg_caps;
 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
 bool hw_mode_done;
 bool mac_phy_done;
 bool ext_hal_reg_done;
 bool mac_phy_chainmask_combo_done;
 bool mac_phy_chainmask_cap_done;
 bool oem_dma_ring_cap_done;
 bool dma_ring_cap_done;
};

struct ath12k_wmi_svc_rdy_ext2_arg {
 u32 reg_db_version;
 u32 hw_min_max_tx_power_2ghz;
 u32 hw_min_max_tx_power_5ghz;
 u32 chwidth_num_peer_caps;
 u32 preamble_puncture_bw;
 u32 max_user_per_ppdu_ofdma;
 u32 max_user_per_ppdu_mumimo;
 u32 target_cap_flags;
 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
 u32 max_num_linkview_peers;
 u32 max_num_msduq_supported_per_tid;
 u32 default_num_msduq_supported_per_tid;
};

struct ath12k_wmi_svc_rdy_ext2_parse {
 struct ath12k_wmi_svc_rdy_ext2_arg arg;
 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
 bool dma_ring_cap_done;
 bool spectral_bin_scaling_done;
 bool mac_phy_caps_ext_done;
 bool hal_reg_caps_ext2_done;
 bool scan_radio_caps_ext2_done;
 bool twt_caps_done;
 bool htt_msdu_idx_to_qtype_map_done;
 bool dbs_or_sbs_cap_ext_done;
};

struct ath12k_wmi_rdy_parse {
 u32 num_extra_mac_addr;
};

struct ath12k_wmi_dma_buf_release_arg {
 struct ath12k_wmi_dma_buf_release_fixed_params fixed;
 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
 u32 num_buf_entry;
 u32 num_meta;
 bool buf_entry_done;
 bool meta_data_done;
};

struct ath12k_wmi_tlv_policy {
 size_t min_len;
};

struct wmi_tlv_mgmt_rx_parse {
 const struct ath12k_wmi_mgmt_rx_params *fixed;
 const u8 *frame_buf;
 bool frame_buf_done;
};

static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
 [WMI_TAG_SERVICE_READY_EVENT] = {
  .min_len = sizeof(struct wmi_service_ready_event) },
 [WMI_TAG_SERVICE_READY_EXT_EVENT] = {
  .min_len = sizeof(struct wmi_service_ready_ext_event) },
 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
  .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
  .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
  .min_len = sizeof(struct wmi_vdev_start_resp_event) },
 [WMI_TAG_PEER_DELETE_RESP_EVENT] = {
  .min_len = sizeof(struct wmi_peer_delete_resp_event) },
 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
  .min_len = sizeof(struct wmi_bcn_tx_status_event) },
 [WMI_TAG_VDEV_STOPPED_EVENT] = {
  .min_len = sizeof(struct wmi_vdev_stopped_event) },
 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
  .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
 [WMI_TAG_MGMT_RX_HDR] = {
  .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
 [WMI_TAG_MGMT_TX_COMPL_EVENT] = {
  .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
 [WMI_TAG_SCAN_EVENT] = {
  .min_len = sizeof(struct wmi_scan_event) },
 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
  .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
 [WMI_TAG_ROAM_EVENT] = {
  .min_len = sizeof(struct wmi_roam_event) },
 [WMI_TAG_CHAN_INFO_EVENT] = {
  .min_len = sizeof(struct wmi_chan_info_event) },
 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
  .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
  .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
 [WMI_TAG_READY_EVENT] = {
  .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
  .min_len = sizeof(struct wmi_service_available_event) },
 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
  .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
 [WMI_TAG_RFKILL_EVENT] = {
  .min_len = sizeof(struct wmi_rfkill_state_change_event) },
 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
  .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
 [WMI_TAG_HOST_SWFDA_EVENT] = {
  .min_len = sizeof(struct wmi_fils_discovery_event) },
 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
  .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
  .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
  .min_len = sizeof(struct wmi_twt_enable_event) },
 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
  .min_len = sizeof(struct wmi_twt_disable_event) },
 [WMI_TAG_P2P_NOA_INFO] = {
  .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
 [WMI_TAG_P2P_NOA_EVENT] = {
  .min_len = sizeof(struct wmi_p2p_noa_event) },
 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
  .min_len = sizeof(struct wmi_11d_new_cc_event) },
};

__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
{
 return le32_encode_bits(cmd, WMI_TLV_TAG) |
  le32_encode_bits(len, WMI_TLV_LEN);
}

static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
{
 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
}

void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
        struct ath12k_wmi_resource_config_arg *config)
{
 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
 config->num_peers = ab->num_radios *
  ath12k_core_get_max_peers_per_radio(ab);
 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;

 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
  config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
 else
  config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;

 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
 config->dma_burst_size = TARGET_DMA_BURST_SIZE;
 config->rx_skip_defrag_timeout_dup_detection_check =
  TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
 config->vow_config = TARGET_VOW_CONFIG;
 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
 config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
 config->rx_batchmode = TARGET_RX_BATCHMODE;
 /* Indicates host supports peer map v3 and unmap v2 support */
 config->peer_map_unmap_version = 0x32;
 config->twt_ap_pdev_count = ab->num_radios;
 config->twt_ap_sta_count = 1000;
 config->ema_max_vap_cnt = ab->num_radios;
 config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
 config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;

 if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
  config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}

void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
        struct ath12k_wmi_resource_config_arg *config)
{
 config->num_vdevs = 4;
 config->num_peers = 16;
 config->num_tids = 32;

 config->num_offload_peers = 3;
 config->num_offload_reorder_buffs = 3;
 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
 config->num_mcast_groups = 0;
 config->num_mcast_table_elems = 0;
 config->mcast2ucast_mode = 0;
 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
 config->num_wds_entries = 0;
 config->dma_burst_size = 0;
 config->rx_skip_defrag_timeout_dup_detection_check = 0;
 config->vow_config = TARGET_VOW_CONFIG;
 config->gtk_offload_max_vdev = 2;
 config->num_msdu_desc = 0x400;
 config->beacon_tx_offload_max_vdev = 2;
 config->rx_batchmode = TARGET_RX_BATCHMODE;

 config->peer_map_unmap_version = 0x1;
 config->use_pdev_id = 1;
 config->max_frag_entries = 0xa;
 config->num_tdls_vdevs = 0x1;
 config->num_tdls_conn_table_entries = 8;
 config->beacon_tx_offload_max_vdev = 0x2;
 config->num_multicast_filter_entries = 0x20;
 config->num_wow_filters = 0x16;
 config->num_keep_alive_pattern = 0;
}

#define PRIMAP(_hw_mode_) \
 [_hw_mode_] = _hw_mode_##_PRI

static const int ath12k_hw_mode_pri_map[] = {
 PRIMAP(WMI_HOST_HW_MODE_SINGLE),
 PRIMAP(WMI_HOST_HW_MODE_DBS),
 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
 PRIMAP(WMI_HOST_HW_MODE_SBS),
 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
 /* keep last */
 PRIMAP(WMI_HOST_HW_MODE_MAX),
};

static int
ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
      int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
    const void *ptr, void *data),
      void *data)
{
 const void *begin = ptr;
 const struct wmi_tlv *tlv;
 u16 tlv_tag, tlv_len;
 int ret;

 while (len > 0) {
  if (len < sizeof(*tlv)) {
   ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
       ptr - begin, len, sizeof(*tlv));
   return -EINVAL;
  }

  tlv = ptr;
  tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
  tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
  ptr += sizeof(*tlv);
  len -= sizeof(*tlv);

  if (tlv_len > len) {
   ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
       tlv_tag, ptr - begin, len, tlv_len);
   return -EINVAL;
  }

  if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
      ath12k_wmi_tlv_policies[tlv_tag].min_len &&
      ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
   ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
       tlv_tag, ptr - begin, tlv_len,
       ath12k_wmi_tlv_policies[tlv_tag].min_len);
   return -EINVAL;
  }

  ret = iter(ab, tlv_tag, tlv_len, ptr, data);
  if (ret)
   return ret;

  ptr += tlv_len;
  len -= tlv_len;
 }

 return 0;
}

static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
         const void *ptr, void *data)
{
 const void **tb = data;

 if (tag < WMI_TAG_MAX)
  tb[tag] = ptr;

 return 0;
}

static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
    const void *ptr, size_t len)
{
 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
       (void *)tb);
}

static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
      struct sk_buff *skb, gfp_t gfp)
{
 const void **tb;
 int ret;

 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
 if (!tb)
  return ERR_PTR(-ENOMEM);

 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
 if (ret) {
  kfree(tb);
  return ERR_PTR(ret);
 }

 return tb;
}

static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
          u32 cmd_id)
{
 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
 struct ath12k_base *ab = wmi->wmi_ab->ab;
 struct wmi_cmd_hdr *cmd_hdr;
 int ret;

 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
  return -ENOMEM;

 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);

 memset(skb_cb, 0, sizeof(*skb_cb));
 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);

 if (ret)
  goto err_pull;

 return 0;

err_pull:
 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
 return ret;
}

int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
   u32 cmd_id)
{
 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
 int ret = -EOPNOTSUPP;

 might_sleep();

 wait_event_timeout(wmi_ab->tx_credits_wq, ({
  ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);

  if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
   ret = -ESHUTDOWN;

  (ret != -EAGAIN);
 }), WMI_SEND_TIMEOUT_HZ);

 if (ret == -EAGAIN)
  ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);

 return ret;
}

static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
         const void *ptr,
         struct ath12k_wmi_service_ext_arg *arg)
{
 const struct wmi_service_ready_ext_event *ev = ptr;
 int i;

 if (!ev)
  return -EINVAL;

 /* Move this to host based bitmap */
 arg->default_conc_scan_config_bits =
  le32_to_cpu(ev->default_conc_scan_config_bits);
 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
 arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
 arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);

 for (i = 0; i < WMI_MAX_NUM_SS; i++)
  arg->ppet.ppet16_ppet8_ru3_ru0[i] =
   le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);

 return 0;
}

static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
          struct ath12k_wmi_svc_rdy_ext_parse *svc,
          u8 hw_mode_id, u8 phy_id,
          struct ath12k_pdev *pdev)
{
 const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
 struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
 struct ath12k_band_cap *cap_band;
 struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
 struct ath12k_fw_pdev *fw_pdev;
 u32 phy_map;
 u32 hw_idx, phy_idx = 0;
 int i;

 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
  return -EINVAL;

 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
  if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
   break;

  phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
  phy_idx = fls(phy_map);
 }

 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
  return -EINVAL;

 phy_idx += phy_id;
 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
  return -EINVAL;

 mac_caps = wmi_mac_phy_caps + phy_idx;

 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);

 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
 fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
 ab->fw_pdev_count++;

 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
 * band to band for a single radio, need to see how this should be
 * handled.
 */

 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
  pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
  pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
  pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
  pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
  pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
  pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
  pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
  pdev_cap->nss_ratio_enabled =
   WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
  pdev_cap->nss_ratio_info =
   WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
 } else {
  return -EINVAL;
 }

 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
 * For example, for 4x4 capable macphys, first 4 chains can be used for first
 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
 * will be advertised for second mac or vice-versa. Compute the shift value
 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
 * mac80211.
 */

 pdev_cap->tx_chain_mask_shift =
   find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
 pdev_cap->rx_chain_mask_shift =
   find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);

 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
  cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
  cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
  cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
  cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
  cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
  cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
  cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
  for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
   cap_band->he_cap_phy_info[i] =
    le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);

  cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
  cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);

  for (i = 0; i < WMI_MAX_NUM_SS; i++)
   cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
    le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
 }

 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
  cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
  cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
  cap_band->max_bw_supported =
   le32_to_cpu(mac_caps->max_bw_supported_5g);
  cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
  cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
  cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
  cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
  for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
   cap_band->he_cap_phy_info[i] =
    le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);

  cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
  cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);

  for (i = 0; i < WMI_MAX_NUM_SS; i++)
   cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
    le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);

  cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
  cap_band->max_bw_supported =
   le32_to_cpu(mac_caps->max_bw_supported_5g);
  cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
  cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
  cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
  cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
  for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
   cap_band->he_cap_phy_info[i] =
    le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);

  cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
  cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);

  for (i = 0; i < WMI_MAX_NUM_SS; i++)
   cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
    le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
 }

 return 0;
}

static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
    const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
    const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
    u8 phy_idx,
    struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
{
 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;

 if (!reg_caps || !ext_caps)
  return -EINVAL;

 if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
  return -EINVAL;

 ext_reg_cap = &ext_caps[phy_idx];

 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
 param->eeprom_reg_domain_ext =
  le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
 /* check if param->wireless_mode is needed */
 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);

 return 0;
}

static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
      const void *evt_buf,
      struct ath12k_wmi_target_cap_arg *cap)
{
 const struct wmi_service_ready_event *ev = evt_buf;

 if (!ev) {
  ath12k_err(ab, "%s: failed by NULL param\n",
      __func__);
  return -EINVAL;
 }

 cap->phy_capability = le32_to_cpu(ev->phy_capability);
 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);

 return 0;
}

/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
 * 4-byte word.
 */

static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
        const u32 *wmi_svc_bm)
{
 int i, j;

 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
  do {
   if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
    set_bit(j, wmi->wmi_ab->svc_map);
  } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
 }
}

static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
        const void *ptr, void *data)
{
 struct ath12k_wmi_svc_ready_parse *svc_ready = data;
 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
 u16 expect_len;

 switch (tag) {
 case WMI_TAG_SERVICE_READY_EVENT:
  if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
   return -EINVAL;
  break;

 case WMI_TAG_ARRAY_UINT32:
  if (!svc_ready->wmi_svc_bitmap_done) {
   expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
   if (len < expect_len) {
    ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
         len, tag);
    return -EINVAL;
   }

   ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);

   svc_ready->wmi_svc_bitmap_done = true;
  }
  break;
 default:
  break;
 }

 return 0;
}

static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
{
 struct ath12k_wmi_svc_ready_parse svc_ready = { };
 int ret;

 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
      ath12k_wmi_svc_rdy_parse,
      &svc_ready);
 if (ret) {
  ath12k_warn(ab, "failed to parse tlv %d\n", ret);
  return ret;
 }

 return 0;
}

static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
        struct ieee80211_tx_info *info)
{
 struct ath12k_base *ab = ar->ab;
 u32 freq = 0;

 if (ab->hw_params->single_pdev_only &&
     ar->scan.is_roc &&
     (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
  freq = ar->scan.roc_freq;

 return freq;
}

struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
 struct sk_buff *skb;
 struct ath12k_base *ab = wmi_ab->ab;
 u32 round_len = roundup(len, 4);

 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
 if (!skb)
  return NULL;

 skb_reserve(skb, WMI_SKB_HEADROOM);
 if (!IS_ALIGNED((unsigned long)skb->data, 4))
  ath12k_warn(ab, "unaligned WMI skb data\n");

 skb_put(skb, round_len);
 memset(skb->data, 0, round_len);

 return skb;
}

int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
    struct sk_buff *frame)
{
 struct ath12k *ar = arvif->ar;
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_mgmt_send_cmd *cmd;
 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data;
 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params);
 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
 struct ath12k_wmi_mlo_mgmt_send_params *ml_params;
 struct ath12k_base *ab = ar->ab;
 struct wmi_tlv *frame_tlv, *tlv;
 struct ath12k_skb_cb *skb_cb;
 u32 buf_len, buf_len_aligned;
 u32 vdev_id = arvif->vdev_id;
 bool link_agnostic = false;
 struct sk_buff *skb;
 int ret, len;
 void *ptr;

 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);

 buf_len_aligned = roundup(buf_len, sizeof(u32));

 len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;

 if (ieee80211_vif_is_mld(vif)) {
  skb_cb = ATH12K_SKB_CB(frame);
  if ((skb_cb->flags & ATH12K_SKB_MLO_STA) &&
      ab->hw_params->hw_ops->is_frame_link_agnostic &&
      ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) {
   len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params);
   ath12k_generic_dbg(ATH12K_DBG_MGMT,
        "Sending Mgmt Frame fc 0x%0x as link agnostic",
        mgmt->frame_control);
   link_agnostic = true;
  }
 }

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_mgmt_send_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->desc_id = cpu_to_le32(buf_id);
 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
 cmd->frame_len = cpu_to_le32(frame->len);
 cmd->buf_len = cpu_to_le32(buf_len);
 cmd->tx_params_valid = 0;

 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned);

 memcpy(frame_tlv->value, frame->data, buf_len);

 if (!link_agnostic)
  goto send;

 ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;

 tlv = ptr;

 /* Tx params not used currently */
 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len);
 ptr += cmd_len;

 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params));
 ptr += TLV_HDR_SIZE;

 ml_params = ptr;
 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS,
             sizeof(*ml_params));

 ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID);

send:
 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
          u32 vdev_id, u32 pdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_request_stats_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_request_stats_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
       sizeof(*cmd));

 cmd->stats_id = cpu_to_le32(stats_id);
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->pdev_id = cpu_to_le32(pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
  dev_kfree_skb(skb);
 }

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI request stats 0x%x vdev id %d pdev id %d\n",
     stats_id, vdev_id, pdev_id);

 return ret;
}

int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
      struct ath12k_wmi_vdev_create_arg *args)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_create_cmd *cmd;
 struct sk_buff *skb;
 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
 bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
 struct wmi_vdev_create_mlo_params *ml_params;
 struct wmi_tlv *tlv;
 int ret, len;
 void *ptr;

 /* It can be optimized my sending tx/rx chain configuration
 * only for supported bands instead of always sending it for
 * both the bands.
 */

 len = sizeof(*cmd) + TLV_HDR_SIZE +
  (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
  (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_create_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
       sizeof(*cmd));

 cmd->vdev_id = cpu_to_le32(args->if_id);
 cmd->vdev_type = cpu_to_le32(args->type);
 cmd->vdev_subtype = cpu_to_le32(args->subtype);
 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
 cmd->pdev_id = cpu_to_le32(args->pdev_id);
 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);

 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
  cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));

 ptr = skb->data + sizeof(*cmd);
 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);

 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);

 ptr += TLV_HDR_SIZE;
 txrx_streams = ptr;
 len = sizeof(*txrx_streams);
 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
         len);
 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
 txrx_streams->supported_tx_streams =
    cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
 txrx_streams->supported_rx_streams =
    cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);

 txrx_streams++;
 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
         len);
 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
 txrx_streams->supported_tx_streams =
    cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
 txrx_streams->supported_rx_streams =
    cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);

 ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);

 if (is_ml_vdev) {
  tlv = ptr;
  tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
       sizeof(*ml_params));
  ptr += TLV_HDR_SIZE;
  ml_params = ptr;

  ml_params->tlv_header =
   ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
            sizeof(*ml_params));
  ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
 }

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
     args->if_id, args->type, args->subtype,
     macaddr, args->pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to submit WMI_VDEV_CREATE_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_delete_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_stop_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_stop_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_down_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_down_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
           struct wmi_vdev_start_req_arg *arg)
{
 u32 center_freq1 = arg->band_center_freq1;

 memset(chan, 0, sizeof(*chan));

 chan->mhz = cpu_to_le32(arg->freq);
 chan->band_center_freq1 = cpu_to_le32(center_freq1);
 if (arg->mode == MODE_11BE_EHT320) {
  if (arg->freq > center_freq1)
   chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
  else
   chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);

  chan->band_center_freq2 = cpu_to_le32(center_freq1);

 } else if (arg->mode == MODE_11BE_EHT160 ||
     arg->mode == MODE_11AX_HE160) {
  if (arg->freq > center_freq1)
   chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
  else
   chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);

  chan->band_center_freq2 = cpu_to_le32(center_freq1);
 } else {
  chan->band_center_freq2 = 0;
 }

 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
 if (arg->passive)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
 if (arg->allow_ibss)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
 if (arg->allow_ht)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
 if (arg->allow_vht)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
 if (arg->allow_he)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
 if (arg->ht40plus)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
 if (arg->chan_radar)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
 if (arg->freq2_radar)
  chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);

 chan->reg_info_1 = le32_encode_bits(arg->max_power,
         WMI_CHAN_REG_INFO1_MAX_PWR) |
  le32_encode_bits(arg->max_reg_power,
     WMI_CHAN_REG_INFO1_MAX_REG_PWR);

 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
         WMI_CHAN_REG_INFO2_ANT_MAX) |
  le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
}

int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
     bool restart)
{
 struct wmi_vdev_start_mlo_params *ml_params;
 struct wmi_partner_link_info *partner_info;
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_start_request_cmd *cmd;
 struct sk_buff *skb;
 struct ath12k_wmi_channel_params *chan;
 struct wmi_tlv *tlv;
 void *ptr;
 int ret, len, i, ml_arg_size = 0;

 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  return -EINVAL;

 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;

 if (!restart && arg->ml.enabled) {
  ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
         TLV_HDR_SIZE + (arg->ml.num_partner_links *
           sizeof(*partner_info));
  len += ml_arg_size;
 }
 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
 cmd->dtim_period = cpu_to_le32(arg->dtim_period);
 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
 cmd->regdomain = cpu_to_le32(arg->regdomain);
 cmd->he_ops = cpu_to_le32(arg->he_ops);
 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);

 if (!restart) {
  if (arg->ssid) {
   cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
   memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  }
  if (arg->hidden_ssid)
   cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
  if (arg->pmf_enabled)
   cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
 }

 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);

 ptr = skb->data + sizeof(*cmd);
 chan = ptr;

 ath12k_wmi_put_wmi_channel(chan, arg);

 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
        sizeof(*chan));
 ptr += sizeof(*chan);

 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);

 /* Note: This is a nested TLV containing:
 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
 */


 ptr += sizeof(*tlv);

 if (ml_arg_size) {
  tlv = ptr;
  tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
       sizeof(*ml_params));
  ptr += TLV_HDR_SIZE;

  ml_params = ptr;

  ml_params->tlv_header =
   ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
            sizeof(*ml_params));

  ml_params->flags = le32_encode_bits(arg->ml.enabled,
          ATH12K_WMI_FLAG_MLO_ENABLED) |
       le32_encode_bits(arg->ml.assoc_link,
          ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
       le32_encode_bits(arg->ml.mcast_link,
          ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
       le32_encode_bits(arg->ml.link_add,
          ATH12K_WMI_FLAG_MLO_LINK_ADD);

  ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
      arg->vdev_id, ml_params->flags);

  ptr += sizeof(*ml_params);

  tlv = ptr;
  tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
       arg->ml.num_partner_links *
       sizeof(*partner_info));
  ptr += TLV_HDR_SIZE;

  partner_info = ptr;

  for (i = 0; i < arg->ml.num_partner_links; i++) {
   partner_info->tlv_header =
    ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
             sizeof(*partner_info));
   partner_info->vdev_id =
    cpu_to_le32(arg->ml.partner_info[i].vdev_id);
   partner_info->hw_link_id =
    cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
   ether_addr_copy(partner_info->vdev_addr.addr,
     arg->ml.partner_info[i].addr);

   ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
       partner_info->vdev_id, partner_info->hw_link_id,
       partner_info->vdev_addr.addr);

   partner_info++;
  }

  ptr = partner_info;
 }

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
     restart ? "restart" : "start", arg->vdev_id,
     arg->freq, arg->mode);

 if (restart)
  ret = ath12k_wmi_cmd_send(wmi, skb,
       WMI_VDEV_RESTART_REQUEST_CMDID);
 else
  ret = ath12k_wmi_cmd_send(wmi, skb,
       WMI_VDEV_START_REQUEST_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
       restart ? "restart" : "start");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_up_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_up_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(params->vdev_id);
 cmd->vdev_assoc_id = cpu_to_le32(params->aid);

 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);

 if (params->tx_bssid) {
  ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
  cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
  cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
 }

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
     params->vdev_id, params->aid, params->bssid);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
        struct ath12k_wmi_peer_create_arg *arg)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_peer_create_cmd *cmd;
 struct sk_buff *skb;
 int ret, len;
 struct wmi_peer_create_mlo_params *ml_param;
 void *ptr;
 struct wmi_tlv *tlv;

 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_create_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
       sizeof(*cmd));

 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
 cmd->peer_type = cpu_to_le32(arg->peer_type);
 cmd->vdev_id = cpu_to_le32(arg->vdev_id);

 ptr = skb->data + sizeof(*cmd);
 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
      sizeof(*ml_param));
 ptr += TLV_HDR_SIZE;
 ml_param = ptr;
 ml_param->tlv_header =
   ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
            sizeof(*ml_param));
 if (arg->ml_enabled)
  ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);

 ptr += sizeof(*ml_param);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
     arg->vdev_id, arg->peer_addr, ml_param->flags);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
        const u8 *peer_addr, u8 vdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_peer_delete_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_delete_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
       sizeof(*cmd));

 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
 cmd->vdev_id = cpu_to_le32(vdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI peer delete vdev_id %d peer_addr %pM\n",
     vdev_id,  peer_addr);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
           struct ath12k_wmi_pdev_set_regdomain_arg *arg)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_set_regdomain_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
       sizeof(*cmd));

 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
 cmd->pdev_id = cpu_to_le32(arg->pdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
     arg->current_rd_in_use, arg->current_rd_2g,
     arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
         u32 vdev_id, u32 param_id, u32 param_val)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_peer_set_param_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
       sizeof(*cmd));
 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->param_id = cpu_to_le32(param_id);
 cmd->param_value = cpu_to_le32(param_val);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI vdev %d peer 0x%pM set param %d value %d\n",
     vdev_id, peer_addr, param_id, param_val);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
     u8 peer_addr[ETH_ALEN],
     u32 peer_tid_bitmap,
     u8 vdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_peer_flush_tids_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
       sizeof(*cmd));

 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
 cmd->vdev_id = cpu_to_le32(vdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
     vdev_id, peer_addr, peer_tid_bitmap);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
        int vdev_id, const u8 *addr,
        dma_addr_t paddr, u8 tid,
        u8 ba_window_size_valid,
        u32 ba_window_size)
{
 struct wmi_peer_reorder_queue_setup_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
       sizeof(*cmd));

 ether_addr_copy(cmd->peer_macaddr.addr, addr);
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->tid = cpu_to_le32(tid);
 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
 cmd->queue_no = cpu_to_le32(tid);
 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
 cmd->ba_window_size = cpu_to_le32(ba_window_size);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
     addr, vdev_id, tid);

 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
      WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int
ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
     struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_peer_reorder_queue_remove_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
       sizeof(*cmd));

 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
     arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);

 ret = ath12k_wmi_cmd_send(wmi, skb,
      WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
         u32 param_value, u8 pdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_set_param_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
       sizeof(*cmd));
 cmd->pdev_id = cpu_to_le32(pdev_id);
 cmd->param_id = cpu_to_le32(param_id);
 cmd->param_value = cpu_to_le32(param_value);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI pdev set param %d pdev id %d value %d\n",
     param_id, pdev_id, param_value);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_set_ps_mode_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->sta_ps_mode = cpu_to_le32(enable);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI vdev set psmode %d vdev id %d\n",
     enable, vdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
       u32 pdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_suspend_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
       sizeof(*cmd));

 cmd->suspend_opt = cpu_to_le32(suspend_opt);
 cmd->pdev_id = cpu_to_le32(pdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI pdev suspend pdev_id %d\n", pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_resume_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_resume_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
       sizeof(*cmd));
 cmd->pdev_id = cpu_to_le32(pdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI pdev resume pdev id %d\n", pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

/* TODO FW Support for the cmd is not available yet.
 * Can be tested once the command and corresponding
 * event is implemented in FW
 */

int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
       enum wmi_bss_chan_info_req_type type)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_pdev_bss_chan_info_req_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;

 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
       sizeof(*cmd));
 cmd->req_type = cpu_to_le32(type);
 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI bss chan info req type %d\n", type);

 ret = ath12k_wmi_cmd_send(wmi, skb,
      WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
     struct ath12k_wmi_ap_ps_arg *arg)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_ap_ps_peer_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
       sizeof(*cmd));

 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
 cmd->param = cpu_to_le32(arg->param);
 cmd->value = cpu_to_le32(arg->value);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
     arg->vdev_id, peer_addr, arg->param, arg->value);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
    u32 param, u32 param_value)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_sta_powersave_param_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
       sizeof(*cmd));

 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->param = cpu_to_le32(param);
 cmd->value = cpu_to_le32(param_value);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI set sta ps vdev_id %d param %d value %d\n",
     vdev_id, param, param_value);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_force_fw_hang_cmd *cmd;
 struct sk_buff *skb;
 int ret, len;

 len = sizeof(*cmd);

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
       len);

 cmd->type = cpu_to_le32(type);
 cmd->delay_time_ms = cpu_to_le32(delay_time_ms);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);

 if (ret) {
  ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
  dev_kfree_skb(skb);
 }
 return ret;
}

int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
      u32 param_id, u32 param_value)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_set_param_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
       sizeof(*cmd));

 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->param_id = cpu_to_le32(param_id);
 cmd->param_value = cpu_to_le32(param_value);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI vdev id 0x%x set param %d value %d\n",
     vdev_id, param_id, param_value);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_get_pdev_temperature_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
       sizeof(*cmd));
 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
         u32 vdev_id, u32 bcn_ctrl_op)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_bcn_offload_ctrl_cmd *cmd;
 struct sk_buff *skb;
 int ret;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
       sizeof(*cmd));

 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
     vdev_id, bcn_ctrl_op);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
        const u8 *p2p_ie)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
 size_t p2p_ie_len, aligned_len;
 struct wmi_tlv *tlv;
 struct sk_buff *skb;
 void *ptr;
 int ret, len;

 p2p_ie_len = p2p_ie[1] + 2;
 aligned_len = roundup(p2p_ie_len, sizeof(u32));

 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 ptr = skb->data;
 cmd = ptr;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);

 ptr += sizeof(*cmd);
 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
          aligned_len);
 memcpy(tlv->value, p2p_ie, p2p_ie_len);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
 if (ret) {
  ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
   struct ieee80211_mutable_offsets *offs,
   struct sk_buff *bcn,
   struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
 struct ath12k *ar = arvif->ar;
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct ath12k_base *ab = ar->ab;
 struct wmi_bcn_tmpl_cmd *cmd;
 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
 struct ath12k_vif *ahvif = arvif->ahvif;
 struct ieee80211_bss_conf *conf;
 u32 vdev_id = arvif->vdev_id;
 struct wmi_tlv *tlv;
 struct sk_buff *skb;
 u32 ema_params = 0;
 void *ptr;
 int ret, len;
 size_t aligned_len = roundup(bcn->len, 4);

 conf = ath12k_mac_get_link_bss_conf(arvif);
 if (!conf) {
  ath12k_warn(ab,
       "unable to access bss link conf in beacon template command for vif %pM link %u\n",
       ahvif->vif->addr, arvif->link_id);
  return -EINVAL;
 }

 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(vdev_id);
 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);

 if (conf->csa_active) {
  cmd->csa_switch_count_offset =
    cpu_to_le32(offs->cntdwn_counter_offs[0]);
  cmd->ext_csa_switch_count_offset =
    cpu_to_le32(offs->cntdwn_counter_offs[1]);
  cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
  arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
 }

 cmd->buf_len = cpu_to_le32(bcn->len);
 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
 if (ema_args) {
  u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
  u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
  if (ema_args->bcn_index == 0)
   u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
  if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
   u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
  cmd->ema_params = cpu_to_le32(ema_params);
 }
 cmd->feature_enable_bitmap =
  cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
         WMI_BEACON_PROTECTION_EN_BIT));

 ptr = skb->data + sizeof(*cmd);

 bcn_prb_info = ptr;
 len = sizeof(*bcn_prb_info);
 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
         len);
 bcn_prb_info->caps = 0;
 bcn_prb_info->erp = 0;

 ptr += sizeof(*bcn_prb_info);

 tlv = ptr;
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
 memcpy(tlv->value, bcn->data, bcn->len);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
 if (ret) {
  ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

int ath12k_wmi_vdev_install_key(struct ath12k *ar,
    struct wmi_vdev_install_key_arg *arg)
{
 struct ath12k_wmi_pdev *wmi = ar->wmi;
 struct wmi_vdev_install_key_cmd *cmd;
 struct wmi_tlv *tlv;
 struct sk_buff *skb;
 int ret, len, key_len_aligned;

 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
 * length is specified in cmd->key_len.
 */

 key_len_aligned = roundup(arg->key_len, 4);

 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;

 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 if (!skb)
  return -ENOMEM;

 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
       sizeof(*cmd));
 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
 cmd->key_idx = cpu_to_le32(arg->key_idx);
 cmd->key_flags = cpu_to_le32(arg->key_flags);
 cmd->key_cipher = cpu_to_le32(arg->key_cipher);
 cmd->key_len = cpu_to_le32(arg->key_len);
 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);

 if (arg->key_rsc_counter)
  cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);

 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
 memcpy(tlv->value, arg->key_data, arg->key_len);

 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
     "WMI vdev install key idx %d cipher %d len %d\n",
     arg->key_idx, arg->key_cipher, arg->key_len);

 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
 if (ret) {
  ath12k_warn(ar->ab,
       "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
  dev_kfree_skb(skb);
 }

 return ret;
}

static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
           struct ath12k_wmi_peer_assoc_arg *arg,
           bool hw_crypto_disabled)
{
 cmd->peer_flags = 0;
 cmd->peer_flags_ext = 0;

 if (arg->is_wme_set) {
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=98 H=94 G=95

¤ Dauer der Verarbeitung: 0.9 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.