diff options
Diffstat (limited to 'drivers/net/wireless/ath')
93 files changed, 54436 insertions, 506 deletions
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index d88edbf1bea3..910c10028b14 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -63,5 +63,6 @@ source "drivers/net/wireless/ath/wil6210/Kconfig" source "drivers/net/wireless/ath/ath10k/Kconfig" source "drivers/net/wireless/ath/wcn36xx/Kconfig" source "drivers/net/wireless/ath/ath11k/Kconfig" +source "drivers/net/wireless/ath/ath12k/Kconfig" endif diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile index 8e4ae9de5ae4..8d6e6e218d24 100644 --- a/drivers/net/wireless/ath/Makefile +++ b/drivers/net/wireless/ath/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_WIL6210) += wil6210/ obj-$(CONFIG_ATH10K) += ath10k/ obj-$(CONFIG_WCN36XX) += wcn36xx/ obj-$(CONFIG_ATH11K) += ath11k/ +obj-$(CONFIG_ATH12K) += ath12k/ obj-$(CONFIG_ATH_COMMON) += ath.o diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 59926227bd49..c2f3bd35c392 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -208,14 +208,6 @@ ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); } -static inline void -ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, - struct ath10k_ce_pipe *ce_state, - unsigned int value) -{ - ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value); -} - static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, u32 ce_id, u64 addr) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index d34a4d6325b2..cd48eca494ed 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -32,6 +32,9 @@ static const struct of_device_id ath11k_ahb_of_match[] = { { .compatible = "qcom,wcn6750-wifi", .data = (void *)ATH11K_HW_WCN6750_HW10, }, + { .compatible = "qcom,ipq5018-wifi", + .data = (void *)ATH11K_HW_IPQ5018_HW10, + }, { } }; @@ -267,30 +270,42 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; + const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; + u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; + + ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); + ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); + ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) - ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS); + ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { - ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); + ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, - CE_HOST_IE_3_ADDRESS); + ie3_reg_addr); } } static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; + const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; + u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; + + ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); + ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); + ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) - ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS); + ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { - ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); + ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, - CE_HOST_IE_3_ADDRESS); + ie3_reg_addr); } } @@ -1150,6 +1165,22 @@ static int ath11k_ahb_probe(struct platform_device *pdev) if (ret) goto err_core_free; + ab->mem_ce = ab->mem; + + if (ab->hw_params.ce_remap) { + const struct ce_remap *ce_remap = ab->hw_params.ce_remap; + /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 + * and the space is not contiguous, hence remapping the CE registers + * to a new space for accessing them. + */ + ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); + if (IS_ERR(ab->mem_ce)) { + dev_err(&pdev->dev, "ce ioremap error\n"); + ret = -ENOMEM; + goto err_core_free; + } + } + ret = ath11k_ahb_fw_resources_init(ab); if (ret) goto err_core_free; @@ -1236,6 +1267,10 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab) ath11k_ahb_release_smp2p_handle(ab); ath11k_ahb_fw_resource_deinit(ab); ath11k_ce_free_pipes(ab); + + if (ab->hw_params.ce_remap) + iounmap(ab->mem_ce); + ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); } diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 9644ff909502..1fc6360e7f01 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -49,6 +49,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len); #define CE_HOST_IE_2_ADDRESS 0x00A18040 #define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS +/* CE IE registers are different for IPQ5018 */ +#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C +#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050 +#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS + #define CE_HOST_IE_3_SHIFT 0xC #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) @@ -84,6 +89,17 @@ struct ce_pipe_config { __le32 reserved; }; +struct ce_ie_addr { + u32 ie1_reg_addr; + u32 ie2_reg_addr; + u32 ie3_reg_addr; +}; + +struct ce_remap { + u32 base; + u32 size; +}; + struct ce_attr { /* CE_ATTR_* values */ unsigned int flags; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index edf78df9b12f..75fdbe4ef83a 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -54,6 +54,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074, .svc_to_ce_map_len = 21, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, @@ -115,6 +116,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, + .ftm_responder = true, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -137,6 +139,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018, .svc_to_ce_map_len = 19, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, @@ -196,6 +199,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, + .ftm_responder = true, }, { .name = "qca6390 hw2.0", @@ -218,6 +222,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxmda_per_pdev = 2, @@ -279,6 +284,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, + .ftm_responder = false, }, { .name = "qcn9074 hw1.0", @@ -301,6 +307,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, .svc_to_ce_map_len = 18, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, .rx_mac_buf_ring = false, @@ -359,6 +366,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, + .ftm_responder = true, }, { .name = "wcn6855 hw2.0", @@ -381,6 +389,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxmda_per_pdev = 2, @@ -442,6 +451,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, + .ftm_responder = false, }, { .name = "wcn6855 hw2.1", @@ -524,6 +534,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, + .ftm_responder = false, }, { .name = "wcn6750 hw1.0", @@ -546,6 +557,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxmda_per_pdev = 1, @@ -603,6 +615,87 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750, .smp2p_wow_exit = true, .support_fw_mac_sequence = true, + .ftm_responder = false, + }, + { + .hw_rev = ATH11K_HW_IPQ5018_HW10, + .name = "ipq5018 hw1.0", + .fw = { + .dir = "IPQ5018/hw1.0", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, + .max_radios = MAX_RADIOS_5018, + .bdf_addr = 0x4BA00000, + /* hal_desc_sz and hw ops are similar to qcn9074 */ + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, + .ring_mask = &ath11k_hw_ring_mask_ipq8074, + .credit_flow = false, + .max_tx_ring = 1, + .spectral = { + .fft_sz = 2, + .fft_pad_sz = 0, + .summary_pad_sz = 16, + .fft_hdr_len = 24, + .max_fft_bins = 1024, + }, + .internal_sleep_clock = false, + .regs = &ipq5018_regs, + .hw_ops = &ipq5018_ops, + .host_ce_config = ath11k_host_ce_config_qcn9074, + .ce_count = CE_CNT_5018, + .target_ce_config = ath11k_target_ce_config_wlan_ipq5018, + .target_ce_count = TARGET_CE_CNT_5018, + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018, + .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018, + .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018, + .ce_remap = &ath11k_ce_remap_ipq5018, + .rxdma1_enable = true, + .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018, + .rx_mac_buf_ring = false, + .vdev_start_delay = false, + .htt_peer_map_v2 = true, + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), + .supports_monitor = false, + .supports_sta_ps = false, + .supports_shadow_regs = false, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, + .supports_regdb = false, + .idle_ps = false, + .supports_suspend = false, + .hal_params = &ath11k_hw_hal_params_ipq8074, + .single_pdev_only = false, + .cold_boot_calib = true, + .fix_l1ss = true, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = true, + .supports_rssi_stats = false, + .fw_wmi_diag_event = false, + .current_cc_support = false, + .dbr_debug_support = true, + .global_reset = false, + .bios_sar_capa = NULL, + .m3_fw_support = false, + .fixed_bdf_addr = true, + .fixed_mem_region = true, + .static_window_map = false, + .hybrid_bus_type = false, + .fixed_fw_mem = false, + .support_off_channel_tx = false, + .supports_multi_bssid = false, + + .sram_dump = {}, + + .tcl_ring_retry = true, + .tx_ring_size = DP_TCL_DATA_RING_SIZE, + .smp2p_wow_exit = false, + .support_fw_mac_sequence = false, + .ftm_responder = true, }, }; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 22460b0abf03..0830276e5028 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -142,6 +142,7 @@ enum ath11k_hw_rev { ATH11K_HW_WCN6855_HW20, ATH11K_HW_WCN6855_HW21, ATH11K_HW_WCN6750_HW10, + ATH11K_HW_IPQ5018_HW10, }; enum ath11k_firmware_mode { @@ -230,6 +231,13 @@ struct ath11k_he { #define MAX_RADIOS 3 +/* ipq5018 hw param macros */ +#define MAX_RADIOS_5018 1 +#define CE_CNT_5018 6 +#define TARGET_CE_CNT_5018 9 +#define SVC_CE_MAP_LEN_5018 17 +#define RXDMA_PER_PDEV_5018 1 + enum { WMI_HOST_TP_SCALE_MAX = 0, WMI_HOST_TP_SCALE_50 = 1, @@ -338,6 +346,7 @@ struct ath11k_vif { bool is_started; bool is_up; + bool ftm_responder; bool spectral_enabled; bool ps; u32 aid; @@ -512,8 +521,8 @@ struct ath11k_sta { #define ATH11K_MIN_5G_FREQ 4150 #define ATH11K_MIN_6G_FREQ 5925 #define ATH11K_MAX_6G_FREQ 7115 -#define ATH11K_NUM_CHANS 101 -#define ATH11K_MAX_5G_CHAN 173 +#define ATH11K_NUM_CHANS 102 +#define ATH11K_MAX_5G_CHAN 177 enum ath11k_state { ATH11K_STATE_OFF, @@ -843,6 +852,7 @@ struct ath11k_base { struct ath11k_dp dp; void __iomem *mem; + void __iomem *mem_ce; unsigned long mem_len; struct { @@ -912,7 +922,6 @@ struct ath11k_base { enum ath11k_dfs_region dfs_region; #ifdef CONFIG_ATH11K_DEBUGFS struct dentry *debugfs_soc; - struct dentry *debugfs_ath11k; #endif struct ath11k_soc_dp_stats soc_stats; @@ -1138,6 +1147,9 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018 extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[]; +extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[]; +extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[]; + extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[]; int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index ccdf3d5ba1ab..5bb6fd17fdf6 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -976,10 +976,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab) if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; - ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); - if (IS_ERR(ab->debugfs_soc)) - return PTR_ERR(ab->debugfs_soc); - debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); @@ -1001,15 +997,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) int ath11k_debugfs_soc_create(struct ath11k_base *ab) { - ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL); + struct dentry *root; + bool dput_needed; + char name[64]; + int ret; + + root = debugfs_lookup("ath11k", NULL); + if (!root) { + root = debugfs_create_dir("ath11k", NULL); + if (IS_ERR_OR_NULL(root)) + return PTR_ERR(root); + + dput_needed = false; + } else { + /* a dentry from lookup() needs dput() after we don't use it */ + dput_needed = true; + } + + scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus), + dev_name(ab->dev)); + + ab->debugfs_soc = debugfs_create_dir(name, root); + if (IS_ERR_OR_NULL(ab->debugfs_soc)) { + ret = PTR_ERR(ab->debugfs_soc); + goto out; + } + + ret = 0; - return PTR_ERR_OR_ZERO(ab->debugfs_ath11k); +out: + if (dput_needed) + dput(root); + + return ret; } void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) { - debugfs_remove_recursive(ab->debugfs_ath11k); - ab->debugfs_ath11k = NULL; + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = NULL; + + /* We are not removing ath11k directory on purpose, even if it + * would be empty. This simplifies the directory handling and it's + * a minor cosmetic issue to leave an empty ath11k directory to + * debugfs. + */ } EXPORT_SYMBOL(ath11k_debugfs_soc_destroy); diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index c5a4c34d7749..b65a84a88264 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1535,13 +1535,12 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, { struct htt_ppdu_stats_info *ppdu_info; - spin_lock_bh(&ar->data_lock); + lockdep_assert_held(&ar->data_lock); + if (!list_empty(&ar->ppdu_stats_info)) { list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { - if (ppdu_info->ppdu_id == ppdu_id) { - spin_unlock_bh(&ar->data_lock); + if (ppdu_info->ppdu_id == ppdu_id) return ppdu_info; - } } if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { @@ -1553,16 +1552,13 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, kfree(ppdu_info); } } - spin_unlock_bh(&ar->data_lock); ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); if (!ppdu_info) return NULL; - spin_lock_bh(&ar->data_lock); list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); ar->ppdu_stat_list_depth++; - spin_unlock_bh(&ar->data_lock); return ppdu_info; } @@ -1586,16 +1582,17 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ret = -EINVAL; - goto exit; + goto out; } if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) trace_ath11k_htt_ppdu_stats(ar, skb->data, len); + spin_lock_bh(&ar->data_lock); ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); if (!ppdu_info) { ret = -EINVAL; - goto exit; + goto out_unlock_data; } ppdu_info->ppdu_id = ppdu_id; @@ -1604,10 +1601,13 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, (void *)ppdu_info); if (ret) { ath11k_warn(ab, "Failed to parse tlv %d\n", ret); - goto exit; + goto out_unlock_data; } -exit: +out_unlock_data: + spin_unlock_bh(&ar->data_lock); + +out: rcu_read_unlock(); return ret; @@ -3126,6 +3126,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id if (!peer) { ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); return -ENOENT; } @@ -5022,6 +5023,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, } else { rxs->flag |= RX_FLAG_ALLOW_SAME_PN; } + rxs->flag |= RX_FLAG_ONLY_MONITOR; ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 2fd224480d45..22422237500c 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1220,16 +1220,20 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; s = &hal->srng_config[HAL_CE_SRC]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB + + ATH11K_CE_OFFSET(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP + + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s = &hal->srng_config[HAL_CE_DST]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB + + ATH11K_CE_OFFSET(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP + + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - @@ -1237,8 +1241,9 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) s = &hal->srng_config[HAL_CE_DST_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + - HAL_CE_DST_STATUS_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; + HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP + + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 6a1f78ee6eb6..1942d41d6de5 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -321,6 +321,10 @@ struct ath11k_base; #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff +/* IPQ5018 ce registers */ +#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000 +#define HAL_IPQ5018_CE_SIZE 0x200000 + /* Add any other errors here and return them in * ath11k_hal_rx_desc_get_err(). */ @@ -519,6 +523,7 @@ enum hal_srng_dir { #define HAL_SRNG_FLAGS_MSI_INTR 0x00020000 #define HAL_SRNG_FLAGS_CACHED 0x20000000 #define HAL_SRNG_FLAGS_LMAC_RING 0x80000000 +#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000 #define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) #define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10) diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index dbcc0c4035b6..ab8f0ccacc6b 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -791,6 +791,49 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab) ring_hash_map); } +static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab) +{ + u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; + u32 val; + + /* Each hash entry uses three bits to map to a particular ring. */ + u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 | + HAL_HASH_ROUTING_RING_SW2 << 4 | + HAL_HASH_ROUTING_RING_SW3 << 8 | + HAL_HASH_ROUTING_RING_SW4 << 12 | + HAL_HASH_ROUTING_RING_SW1 << 16 | + HAL_HASH_ROUTING_RING_SW2 << 20 | + HAL_HASH_ROUTING_RING_SW3 << 24 | + HAL_HASH_ROUTING_RING_SW4 << 28; + + val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE); + + val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING; + val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING, + HAL_SRNG_RING_ID_REO2SW1) | + FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) | + FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1); + ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val); + + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab), + HAL_DEFAULT_REO_TIMEOUT_USEC); + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab), + HAL_DEFAULT_REO_TIMEOUT_USEC); + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab), + HAL_DEFAULT_REO_TIMEOUT_USEC); + ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab), + HAL_DEFAULT_REO_TIMEOUT_USEC); + + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0, + ring_hash_map); + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1, + ring_hash_map); + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2, + ring_hash_map); + ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3, + ring_hash_map); +} + static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data) { u16 peer_id = 0; @@ -1084,6 +1127,47 @@ const struct ath11k_hw_ops wcn6750_ops = { .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector, }; +/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */ +const struct ath11k_hw_ops ipq5018_ops = { + .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id, + .wmi_init_config = ath11k_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl, + .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention, + .reo_setup = ath11k_hw_ipq5018_reo_setup, + .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload, + .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2, + +}; + #define ATH11K_TX_RING_MASK_0 BIT(0) #define ATH11K_TX_RING_MASK_1 BIT(1) #define ATH11K_TX_RING_MASK_2 BIT(2) @@ -1972,6 +2056,214 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = { }, }; +/* Target firmware's Copy Engine configuration for IPQ5018 */ +const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host Pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(0x2000), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host used only by IPA */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, +}; + +/* Map from service/endpoint to Copy Engine for IPQ5018. + * This table is derived from the CE TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = { + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(0), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(0), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + .pipenum = __cpu_to_le32(4), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + { + .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG), + .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + .pipenum = __cpu_to_le32(5), + }, + + /* (Additions here) */ + + { /* terminator entry */ } +}; + +const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = { + .ie1_reg_addr = CE_HOST_IE_ADDRESS, + .ie2_reg_addr = CE_HOST_IE_2_ADDRESS, + .ie3_reg_addr = CE_HOST_IE_3_ADDRESS, +}; + +const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = { + .ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE, + .ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE, + .ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE, +}; + +const struct ce_remap ath11k_ce_remap_ipq5018 = { + .base = HAL_IPQ5018_CE_WFSS_REG_BASE, + .size = HAL_IPQ5018_CE_SIZE, +}; + const struct ath11k_hw_regs ipq8074_regs = { /* SW2TCL(x) R0 ring configuration address */ .hal_tcl1_ring_base_lsb = 0x00000510, @@ -2437,6 +2729,85 @@ static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[ }, }; +const struct ath11k_hw_regs ipq5018_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_base_lsb = 0x00000694, + .hal_tcl1_ring_base_msb = 0x00000698, + .hal_tcl1_ring_id = 0x0000069c, + .hal_tcl1_ring_misc = 0x000006a4, + .hal_tcl1_ring_tp_addr_lsb = 0x000006b0, + .hal_tcl1_ring_tp_addr_msb = 0x000006b4, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8, + .hal_tcl1_ring_msi1_base_lsb = 0x000006dc, + .hal_tcl1_ring_msi1_base_msb = 0x000006e0, + .hal_tcl1_ring_msi1_data = 0x000006e4, + .hal_tcl2_ring_base_lsb = 0x000006ec, + .hal_tcl_ring_base_lsb = 0x0000079c, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x000008a4, + + /* REO2SW(x) R0 ring configuration address */ + .hal_reo1_ring_base_lsb = 0x000001ec, + .hal_reo1_ring_base_msb = 0x000001f0, + .hal_reo1_ring_id = 0x000001f4, + .hal_reo1_ring_misc = 0x000001fc, + .hal_reo1_ring_hp_addr_lsb = 0x00000200, + .hal_reo1_ring_hp_addr_msb = 0x00000204, + .hal_reo1_ring_producer_int_setup = 0x00000210, + .hal_reo1_ring_msi1_base_lsb = 0x00000234, + .hal_reo1_ring_msi1_base_msb = 0x00000238, + .hal_reo1_ring_msi1_data = 0x0000023c, + .hal_reo2_ring_base_lsb = 0x00000244, + .hal_reo1_aging_thresh_ix_0 = 0x00000564, + .hal_reo1_aging_thresh_ix_1 = 0x00000568, + .hal_reo1_aging_thresh_ix_2 = 0x0000056c, + .hal_reo1_aging_thresh_ix_3 = 0x00000570, + + /* REO2SW(x) R2 ring pointers (head/tail) address */ + .hal_reo1_ring_hp = 0x00003028, + .hal_reo1_ring_tp = 0x0000302c, + .hal_reo2_ring_hp = 0x00003030, + + /* REO2TCL R0 ring configuration address */ + .hal_reo_tcl_ring_base_lsb = 0x000003fc, + .hal_reo_tcl_ring_hp = 0x00003058, + + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x0000013c, + .hal_sw2reo_ring_hp = 0x00003018, + + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x000000e4, + .hal_reo_cmd_ring_hp = 0x00003010, + + /* REO status address */ + .hal_reo_status_ring_base_lsb = 0x00000504, + .hal_reo_status_hp = 0x00003070, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x08400000 + - HAL_IPQ5018_CE_WFSS_REG_BASE, + .hal_seq_wcss_umac_ce0_dst_reg = 0x08401000 + - HAL_IPQ5018_CE_WFSS_REG_BASE, + .hal_seq_wcss_umac_ce1_src_reg = 0x08402000 + - HAL_IPQ5018_CE_WFSS_REG_BASE, + .hal_seq_wcss_umac_ce1_dst_reg = 0x08403000 + - HAL_IPQ5018_CE_WFSS_REG_BASE, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000874, + .hal_wbm_idle_link_ring_misc = 0x00000884, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001ec, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000924, + .hal_wbm1_release_ring_base_lsb = 0x0000097c, +}; + const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = { .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM, .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074, diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 0c5ef8a526d8..0be4e1232384 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -80,6 +80,8 @@ #define ATH11K_M3_FILE "m3.bin" #define ATH11K_REGDB_FILE_NAME "regdb.bin" +#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem) + enum ath11k_hw_rate_cck { ATH11K_HW_RATE_CCK_LP_11M = 0, ATH11K_HW_RATE_CCK_LP_5_5M, @@ -158,6 +160,8 @@ struct ath11k_hw_params { u32 target_ce_count; const struct service_to_pipe *svc_to_ce_map; u32 svc_to_ce_map_len; + const struct ce_ie_addr *ce_ie_addr; + const struct ce_remap *ce_remap; bool single_pdev_only; @@ -220,6 +224,7 @@ struct ath11k_hw_params { u32 tx_ring_size; bool smp2p_wow_exit; bool support_fw_mac_sequence; + bool ftm_responder; }; struct ath11k_hw_ops { @@ -271,12 +276,18 @@ extern const struct ath11k_hw_ops qca6390_ops; extern const struct ath11k_hw_ops qcn9074_ops; extern const struct ath11k_hw_ops wcn6855_ops; extern const struct ath11k_hw_ops wcn6750_ops; +extern const struct ath11k_hw_ops ipq5018_ops; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750; +extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074; +extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018; + +extern const struct ce_remap ath11k_ce_remap_ipq5018; + extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074; extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390; extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750; @@ -406,6 +417,7 @@ extern const struct ath11k_hw_regs qca6390_regs; extern const struct ath11k_hw_regs qcn9074_regs; extern const struct ath11k_hw_regs wcn6855_regs; extern const struct ath11k_hw_regs wcn6750_regs; +extern const struct ath11k_hw_regs ipq5018_regs; static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type) { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9e923ecb0891..110a38cce0a7 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -96,6 +96,7 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = { CHAN5G(165, 5825, 0), CHAN5G(169, 5845, 0), CHAN5G(173, 5865, 0), + CHAN5G(177, 5885, 0), }; static const struct ieee80211_channel ath11k_6ghz_channels[] = { @@ -3110,7 +3111,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, u16 bitrate; int ret = 0; u8 rateidx; - u32 rate; + u32 rate, param; u32 ipv4_cnt; mutex_lock(&ar->conf_mutex); @@ -3412,6 +3413,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } } + if (changed & BSS_CHANGED_FTM_RESPONDER && + arvif->ftm_responder != info->ftm_responder && + ar->ab->hw_params.ftm_responder && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT)) { + arvif->ftm_responder = info->ftm_responder; + param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + arvif->ftm_responder); + if (ret) + ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n", + arvif->vdev_id, ret); + } + if (changed & BSS_CHANGED_FILS_DISCOVERY || changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) ath11k_mac_fils_discovery(arvif, info); @@ -3612,7 +3627,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; - struct scan_req_params arg; + struct scan_req_params *arg = NULL; int ret = 0; int i; u32 scan_timeout; @@ -3640,72 +3655,78 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; - memset(&arg, 0, sizeof(arg)); - ath11k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH11K_SCAN_ID; + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath11k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH11K_SCAN_ID; if (req->ie_len) { - arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); - if (!arg.extraie.ptr) { + arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg->extraie.ptr) { ret = -ENOMEM; goto exit; } - arg.extraie.len = req->ie_len; + arg->extraie.len = req->ie_len; } if (req->n_ssids) { - arg.num_ssids = req->n_ssids; - for (i = 0; i < arg.num_ssids; i++) { - arg.ssid[i].length = req->ssids[i].ssid_len; - memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid, + arg->num_ssids = req->n_ssids; + for (i = 0; i < arg->num_ssids; i++) { + arg->ssid[i].length = req->ssids[i].ssid_len; + memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid, req->ssids[i].ssid_len); } } else { - arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE; } if (req->n_channels) { - arg.num_chan = req->n_channels; - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), - GFP_KERNEL); + arg->num_chan = req->n_channels; + arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), + GFP_KERNEL); - if (!arg.chan_list) { + if (!arg->chan_list) { ret = -ENOMEM; goto exit; } - for (i = 0; i < arg.num_chan; i++) - arg.chan_list[i] = req->channels[i]->center_freq; + for (i = 0; i < arg->num_chan; i++) + arg->chan_list[i] = req->channels[i]->center_freq; } if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - arg.scan_f_add_spoofed_mac_in_probe = 1; - ether_addr_copy(arg.mac_addr.addr, req->mac_addr); - ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); + arg->scan_f_add_spoofed_mac_in_probe = 1; + ether_addr_copy(arg->mac_addr.addr, req->mac_addr); + ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask); } /* if duration is set, default dwell times will be overwritten */ if (req->duration) { - arg.dwell_time_active = req->duration; - arg.dwell_time_active_2g = req->duration; - arg.dwell_time_active_6g = req->duration; - arg.dwell_time_passive = req->duration; - arg.dwell_time_passive_6g = req->duration; - arg.burst_duration = req->duration; - - scan_timeout = min_t(u32, arg.max_rest_time * - (arg.num_chan - 1) + (req->duration + + arg->dwell_time_active = req->duration; + arg->dwell_time_active_2g = req->duration; + arg->dwell_time_active_6g = req->duration; + arg->dwell_time_passive = req->duration; + arg->dwell_time_passive_6g = req->duration; + arg->burst_duration = req->duration; + + scan_timeout = min_t(u32, arg->max_rest_time * + (arg->num_chan - 1) + (req->duration + ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * - arg.num_chan, arg.max_scan_time); + arg->num_chan, arg->max_scan_time); } else { - scan_timeout = arg.max_scan_time; + scan_timeout = arg->max_scan_time; } /* Add a margin to account for event/command processing */ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD; - ret = ath11k_start_scan(ar, &arg); + ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); @@ -3717,10 +3738,11 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, msecs_to_jiffies(scan_timeout)); exit: - kfree(arg.chan_list); - - if (req->ie_len) - kfree(arg.extraie.ptr); + if (arg) { + kfree(arg->chan_list); + kfree(arg->extraie.ptr); + kfree(arg); + } mutex_unlock(&ar->conf_mutex); @@ -9106,6 +9128,10 @@ static int __ath11k_mac_register(struct ath11k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + if (ab->hw_params.ftm_responder) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); + ath11k_reg_init(ar); if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 99cf3357c66e..776362d151cb 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -543,6 +543,8 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) goto clear_master; } + ab->mem_ce = ab->mem; + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); return 0; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 8f2c07d70a4a..0a045af5419b 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1073,6 +1073,7 @@ enum wmi_tlv_vdev_param { WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE, WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME, WMI_VDEV_PARAM_HE_LTF = 0x74, + WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d, WMI_VDEV_PARAM_BA_MODE = 0x7e, WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80, WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87, diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig new file mode 100644 index 000000000000..4f9c514c13e7 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: BSD-3-Clause-Clear +config ATH12K + tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)" + depends on MAC80211 && HAS_DMA && PCI + depends on CRYPTO_MICHAEL_MIC + select QCOM_QMI_HELPERS + select MHI_BUS + select QRTR + select QRTR_MHI + help + Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE + 802.11be) family of chipsets, for example WCN7850 and + QCN9274. + + If you choose to build a module, it'll be called ath12k. + +config ATH12K_DEBUG + bool "ath12k debugging" + depends on ATH12K + help + Enable debug support, for example debug messages which must + be enabled separately using the debug_mask module parameter. + + If unsure, say Y to make it easier to debug problems. But if + you want optimal performance choose N. + +config ATH12K_TRACING + bool "ath12k tracing support" + depends on ATH12K && EVENT_TRACING + help + Enable ath12k tracing infrastructure. + + If unsure, say Y to make it easier to debug problems. But if + you want optimal performance choose N. diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile new file mode 100644 index 000000000000..62c52e733b5e --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause-Clear +obj-$(CONFIG_ATH12K) += ath12k.o +ath12k-y += core.o \ + hal.o \ + hal_tx.o \ + hal_rx.o \ + wmi.o \ + mac.o \ + reg.o \ + htc.o \ + qmi.o \ + dp.o \ + dp_tx.o \ + dp_rx.o \ + debug.o \ + ce.o \ + peer.o \ + dbring.o \ + hw.o \ + mhi.o \ + pci.o \ + dp_mon.o + +ath12k-$(CONFIG_ATH12K_TRACING) += trace.o + +# for tracing framework to find trace.h +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c new file mode 100644 index 000000000000..aed6987804bf --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -0,0 +1,964 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "dp_rx.h" +#include "debug.h" +#include "hif.h" + +const struct ce_attr ath12k_host_ce_config_qcn9274[] = { + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath12k_htc_rx_completion_handler, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + .recv_cb = ath12k_htc_rx_completion_handler, + }, + + /* CE3: host->target WMI (mac0) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: host->target WMI (mac1) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE8: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE9: MHI */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE10: MHI */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE11: MHI */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE12: CV Prefetch */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE13: CV Prefetch */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE14: target->host dbg log */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath12k_htc_rx_completion_handler, + }, + + /* CE15: reserved for future use */ + { + .flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, +}; + +const struct ce_attr ath12k_host_ce_config_wcn7850[] = { + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath12k_htc_rx_completion_handler, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 64, + .recv_cb = ath12k_htc_rx_completion_handler, + }, + + /* CE3: host->target WMI (mac0) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: host->target WMI (mac1) */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE8: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + +}; + +static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe, + struct sk_buff *skb, dma_addr_t paddr) +{ + struct ath12k_base *ab = pipe->ab; + struct ath12k_ce_ring *ring = pipe->dest_ring; + struct hal_srng *srng; + unsigned int write_index; + unsigned int nentries_mask = ring->nentries_mask; + struct hal_ce_srng_dest_desc *desc; + int ret; + + lockdep_assert_held(&ab->ce.ce_lock); + + write_index = ring->write_index; + + srng = &ab->hal.srng_list[ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) { + ret = -ENOSPC; + goto exit; + } + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -ENOSPC; + goto exit; + } + + ath12k_hal_ce_dst_set_desc(desc, paddr); + + ring->skb[write_index] = skb; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ring->write_index = write_index; + + pipe->rx_buf_needed--; + + ret = 0; +exit: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return ret; +} + +static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe) +{ + struct ath12k_base *ab = pipe->ab; + struct sk_buff *skb; + dma_addr_t paddr; + int ret = 0; + + if (!(pipe->dest_ring || pipe->status_ring)) + return 0; + + spin_lock_bh(&ab->ce.ce_lock); + while (pipe->rx_buf_needed) { + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) { + ret = -ENOMEM; + goto exit; + } + + WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ab->dev, paddr))) { + ath12k_warn(ab, "failed to dma map ce rx buf\n"); + dev_kfree_skb_any(skb); + ret = -EIO; + goto exit; + } + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + + ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); + if (ret) { + ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret); + dma_unmap_single(ab->dev, paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + goto exit; + } + } + +exit: + spin_unlock_bh(&ab->ce.ce_lock); + return ret; +} + +static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe, + struct sk_buff **skb, int *nbytes) +{ + struct ath12k_base *ab = pipe->ab; + struct hal_ce_srng_dst_status_desc *desc; + struct hal_srng *srng; + unsigned int sw_index; + unsigned int nentries_mask; + int ret = 0; + + spin_lock_bh(&ab->ce.ce_lock); + + sw_index = pipe->dest_ring->sw_index; + nentries_mask = pipe->dest_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); + if (!desc) { + ret = -EIO; + goto err; + } + + *nbytes = ath12k_hal_ce_dst_status_get_length(desc); + if (*nbytes == 0) { + ret = -EIO; + goto err; + } + + *skb = pipe->dest_ring->skb[sw_index]; + pipe->dest_ring->skb[sw_index] = NULL; + + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + pipe->dest_ring->sw_index = sw_index; + + pipe->rx_buf_needed++; +err: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return ret; +} + +static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe) +{ + struct ath12k_base *ab = pipe->ab; + struct sk_buff *skb; + struct sk_buff_head list; + unsigned int nbytes, max_nbytes; + int ret; + + __skb_queue_head_init(&list); + while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + + if (unlikely(max_nbytes < nbytes)) { + ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n", + pipe->pipe_num, skb->len); + pipe->recv_cb(ab, skb); + } + + ret = ath12k_ce_rx_post_pipe(pipe); + if (ret && ret != -ENOSPC) { + ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", + pipe->pipe_num, ret); + mod_timer(&ab->rx_replenish_retry, + jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES); + } +} + +static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe) +{ + struct ath12k_base *ab = pipe->ab; + struct hal_ce_srng_src_desc *desc; + struct hal_srng *srng; + unsigned int sw_index; + unsigned int nentries_mask; + struct sk_buff *skb; + + spin_lock_bh(&ab->ce.ce_lock); + + sw_index = pipe->src_ring->sw_index; + nentries_mask = pipe->src_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + desc = ath12k_hal_srng_src_reap_next(ab, srng); + if (!desc) { + skb = ERR_PTR(-EIO); + goto err_unlock; + } + + skb = pipe->src_ring->skb[sw_index]; + + pipe->src_ring->skb[sw_index] = NULL; + + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + pipe->src_ring->sw_index = sw_index; + +err_unlock: + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return skb; +} + +static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe) +{ + struct ath12k_base *ab = pipe->ab; + struct sk_buff *skb; + + while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) { + if (!skb) + continue; + + dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id, + struct hal_srng_params *ring_params) +{ + u32 msi_data_start; + u32 msi_data_count, msi_data_idx; + u32 msi_irq_start; + u32 addr_lo; + u32 addr_hi; + int ret; + + ret = ath12k_hif_get_user_msi_vector(ab, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + + if (ret) + return; + + ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi); + ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx); + + ring_params->msi_addr = addr_lo; + ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); + ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; + ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; +} + +static int ath12k_ce_init_ring(struct ath12k_base *ab, + struct ath12k_ce_ring *ce_ring, + int ce_id, enum hal_ring_type type) +{ + struct hal_srng_params params = { 0 }; + int ret; + + params.ring_base_paddr = ce_ring->base_addr_ce_space; + params.ring_base_vaddr = ce_ring->base_addr_owner_space; + params.num_entries = ce_ring->nentries; + + if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags)) + ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); + + switch (type) { + case HAL_CE_SRC: + if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags)) + params.intr_batch_cntr_thres_entries = 1; + break; + case HAL_CE_DST: + params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max; + if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { + params.intr_timer_thres_us = 1024; + params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; + params.low_threshold = ce_ring->nentries - 3; + } + break; + case HAL_CE_DST_STATUS: + if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { + params.intr_batch_cntr_thres_entries = 1; + params.intr_timer_thres_us = 0x1000; + } + break; + default: + ath12k_warn(ab, "Invalid CE ring type %d\n", type); + return -EINVAL; + } + + /* TODO: Init other params needed by HAL to init the ring */ + + ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); + if (ret < 0) { + ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n", + ret, ce_id); + return ret; + } + + ce_ring->hal_ring_id = ret; + + return 0; +} + +static struct ath12k_ce_ring * +ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz) +{ + struct ath12k_ce_ring *ce_ring; + dma_addr_t base_addr; + + ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); + if (!ce_ring) + return ERR_PTR(-ENOMEM); + + ce_ring->nentries = nentries; + ce_ring->nentries_mask = nentries - 1; + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + ce_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ab->dev, + nentries * desc_sz + CE_DESC_RING_ALIGN, + &base_addr, GFP_KERNEL); + if (!ce_ring->base_addr_owner_space_unaligned) { + kfree(ce_ring); + return ERR_PTR(-ENOMEM); + } + + ce_ring->base_addr_ce_space_unaligned = base_addr; + + ce_ring->base_addr_owner_space = + PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + + ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + return ce_ring; +} + +static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id) +{ + struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id]; + struct ath12k_ce_ring *ring; + int nentries; + int desc_sz; + + pipe->attr_flags = attr->flags; + + if (attr->src_nentries) { + pipe->send_cb = ath12k_ce_send_done_cb; + nentries = roundup_pow_of_two(attr->src_nentries); + desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); + ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz); + if (IS_ERR(ring)) + return PTR_ERR(ring); + pipe->src_ring = ring; + } + + if (attr->dest_nentries) { + pipe->recv_cb = attr->recv_cb; + nentries = roundup_pow_of_two(attr->dest_nentries); + desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST); + ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz); + if (IS_ERR(ring)) + return PTR_ERR(ring); + pipe->dest_ring = ring; + + desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); + ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz); + if (IS_ERR(ring)) + return PTR_ERR(ring); + pipe->status_ring = ring; + } + + return 0; +} + +void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id) +{ + struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + + if (pipe->send_cb) + pipe->send_cb(pipe); + + if (pipe->recv_cb) + ath12k_ce_recv_process_cb(pipe); +} + +void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id) +{ + struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + + if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb) + pipe->send_cb(pipe); +} + +int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id, + u16 transfer_id) +{ + struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + struct hal_ce_srng_src_desc *desc; + struct hal_srng *srng; + unsigned int write_index, sw_index; + unsigned int nentries_mask; + int ret = 0; + u8 byte_swap_data = 0; + int num_used; + + /* Check if some entries could be regained by handling tx completion if + * the CE has interrupts disabled and the used entries is more than the + * defined usage threshold. + */ + if (pipe->attr_flags & CE_ATTR_DIS_INTR) { + spin_lock_bh(&ab->ce.ce_lock); + write_index = pipe->src_ring->write_index; + + sw_index = pipe->src_ring->sw_index; + + if (write_index >= sw_index) + num_used = write_index - sw_index; + else + num_used = pipe->src_ring->nentries - sw_index + + write_index; + + spin_unlock_bh(&ab->ce.ce_lock); + + if (num_used > ATH12K_CE_USAGE_THRESHOLD) + ath12k_ce_poll_send_completed(ab, pipe->pipe_num); + } + + if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) + return -ESHUTDOWN; + + spin_lock_bh(&ab->ce.ce_lock); + + write_index = pipe->src_ring->write_index; + nentries_mask = pipe->src_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) { + ath12k_hal_srng_access_end(ab, srng); + ret = -ENOBUFS; + goto unlock; + } + + desc = ath12k_hal_srng_src_get_next_reaped(ab, srng); + if (!desc) { + ath12k_hal_srng_access_end(ab, srng); + ret = -ENOBUFS; + goto unlock; + } + + if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) + byte_swap_data = 1; + + ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr, + skb->len, transfer_id, byte_swap_data); + + pipe->src_ring->skb[write_index] = skb; + pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, + write_index); + + ath12k_hal_srng_access_end(ab, srng); + +unlock: + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return ret; +} + +static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe) +{ + struct ath12k_base *ab = pipe->ab; + struct ath12k_ce_ring *ring = pipe->dest_ring; + struct sk_buff *skb; + int i; + + if (!(ring && pipe->buf_sz)) + return; + + for (i = 0; i < ring->nentries; i++) { + skb = ring->skb[i]; + if (!skb) + continue; + + ring->skb[i] = NULL; + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +void ath12k_ce_cleanup_pipes(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *pipe; + int pipe_num; + + for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) { + pipe = &ab->ce.ce_pipe[pipe_num]; + ath12k_ce_rx_pipe_cleanup(pipe); + + /* Cleanup any src CE's which have interrupts disabled */ + ath12k_ce_poll_send_completed(ab, pipe_num); + + /* NOTE: Should we also clean up tx buffer in all pipes? */ + } +} + +void ath12k_ce_rx_post_buf(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *pipe; + int i; + int ret; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + ret = ath12k_ce_rx_post_pipe(pipe); + if (ret) { + if (ret == -ENOSPC) + continue; + + ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", + i, ret); + mod_timer(&ab->rx_replenish_retry, + jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES); + + return; + } + } +} + +void ath12k_ce_rx_replenish_retry(struct timer_list *t) +{ + struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry); + + ath12k_ce_rx_post_buf(ab); +} + +static void ath12k_ce_shadow_config(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ab->hw_params->host_ce_config[i].src_nentries) + ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i); + + if (ab->hw_params->host_ce_config[i].dest_nentries) { + ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i); + ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i); + } + } +} + +void ath12k_ce_get_shadow_config(struct ath12k_base *ab, + u32 **shadow_cfg, u32 *shadow_cfg_len) +{ + if (!ab->hw_params->supports_shadow_regs) + return; + + ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); + + /* shadow is already configured */ + if (*shadow_cfg_len) + return; + + /* shadow isn't configured yet, configure now. + * non-CE srngs are configured firstly, then + * all CE srngs. + */ + ath12k_hal_srng_shadow_config(ab); + ath12k_ce_shadow_config(ab); + + /* get the shadow configuration */ + ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); +} + +int ath12k_ce_init_pipes(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *pipe; + int i; + int ret; + + ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3, + &ab->qmi.ce_cfg.shadow_reg_v3_len); + + for (i = 0; i < ab->hw_params->ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + + if (pipe->src_ring) { + ret = ath12k_ce_init_ring(ab, pipe->src_ring, i, + HAL_CE_SRC); + if (ret) { + ath12k_warn(ab, "failed to init src ring: %d\n", + ret); + /* Should we clear any partial init */ + return ret; + } + + pipe->src_ring->write_index = 0; + pipe->src_ring->sw_index = 0; + } + + if (pipe->dest_ring) { + ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i, + HAL_CE_DST); + if (ret) { + ath12k_warn(ab, "failed to init dest ring: %d\n", + ret); + /* Should we clear any partial init */ + return ret; + } + + pipe->rx_buf_needed = pipe->dest_ring->nentries ? + pipe->dest_ring->nentries - 2 : 0; + + pipe->dest_ring->write_index = 0; + pipe->dest_ring->sw_index = 0; + } + + if (pipe->status_ring) { + ret = ath12k_ce_init_ring(ab, pipe->status_ring, i, + HAL_CE_DST_STATUS); + if (ret) { + ath12k_warn(ab, "failed to init dest status ing: %d\n", + ret); + /* Should we clear any partial init */ + return ret; + } + + pipe->status_ring->write_index = 0; + pipe->status_ring->sw_index = 0; + } + } + + return 0; +} + +void ath12k_ce_free_pipes(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *pipe; + int desc_sz; + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + + if (pipe->src_ring) { + desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); + dma_free_coherent(ab->dev, + pipe->src_ring->nentries * desc_sz + + CE_DESC_RING_ALIGN, + pipe->src_ring->base_addr_owner_space, + pipe->src_ring->base_addr_ce_space); + kfree(pipe->src_ring); + pipe->src_ring = NULL; + } + + if (pipe->dest_ring) { + desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST); + dma_free_coherent(ab->dev, + pipe->dest_ring->nentries * desc_sz + + CE_DESC_RING_ALIGN, + pipe->dest_ring->base_addr_owner_space, + pipe->dest_ring->base_addr_ce_space); + kfree(pipe->dest_ring); + pipe->dest_ring = NULL; + } + + if (pipe->status_ring) { + desc_sz = + ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); + dma_free_coherent(ab->dev, + pipe->status_ring->nentries * desc_sz + + CE_DESC_RING_ALIGN, + pipe->status_ring->base_addr_owner_space, + pipe->status_ring->base_addr_ce_space); + kfree(pipe->status_ring); + pipe->status_ring = NULL; + } + } +} + +int ath12k_ce_alloc_pipes(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *pipe; + int i; + int ret; + const struct ce_attr *attr; + + spin_lock_init(&ab->ce.ce_lock); + + for (i = 0; i < ab->hw_params->ce_count; i++) { + attr = &ab->hw_params->host_ce_config[i]; + pipe = &ab->ce.ce_pipe[i]; + pipe->pipe_num = i; + pipe->ab = ab; + pipe->buf_sz = attr->src_sz_max; + + ret = ath12k_ce_alloc_pipe(ab, i); + if (ret) { + /* Free any parial successful allocation */ + ath12k_ce_free_pipes(ab); + return ret; + } + } + + return 0; +} + +int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id) +{ + if (ce_id >= ab->hw_params->ce_count) + return -EINVAL; + + return ab->hw_params->host_ce_config[ce_id].flags; +} diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h new file mode 100644 index 000000000000..17cf16235e0b --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_CE_H +#define ATH12K_CE_H + +#define CE_COUNT_MAX 16 + +/* Byte swap data words */ +#define CE_ATTR_BYTE_SWAP_DATA 2 + +/* no interrupt on copy completion */ +#define CE_ATTR_DIS_INTR 8 + +/* Host software's Copy Engine configuration. */ +#define CE_ATTR_FLAGS 0 + +/* Threshold to poll for tx completion in case of Interrupt disabled CE's */ +#define ATH12K_CE_USAGE_THRESHOLD 32 + +/* Directions for interconnect pipe configuration. + * These definitions may be used during configuration and are shared + * between Host and Target. + * + * Pipe Directions are relative to the Host, so PIPEDIR_IN means + * "coming IN over air through Target to Host" as with a WiFi Rx operation. + * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" + * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" + * Target since things that are "PIPEDIR_OUT" are coming IN to the Target + * over the interconnect. + */ +#define PIPEDIR_NONE 0 +#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ +#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ +#define PIPEDIR_INOUT 3 /* bidirectional */ +#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */ + +/* CE address/mask */ +#define CE_HOST_IE_ADDRESS 0x00A1803C +#define CE_HOST_IE_2_ADDRESS 0x00A18040 +#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS + +#define CE_HOST_IE_3_SHIFT 0xC + +#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) + +#define ATH12K_CE_RX_POST_RETRY_JIFFIES 50 + +struct ath12k_base; + +/* Establish a mapping between a service/direction and a pipe. + * Configuration information for a Copy Engine pipe and services. + * Passed from Host to Target through QMI message and must be in + * little endian format. + */ +struct service_to_pipe { + __le32 service_id; + __le32 pipedir; + __le32 pipenum; +}; + +/* Configuration information for a Copy Engine pipe. + * Passed from Host to Target through QMI message during startup (one per CE). + * + * NOTE: Structure is shared between Host software and Target firmware! + */ +struct ce_pipe_config { + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +struct ce_attr { + /* CE_ATTR_* values */ + unsigned int flags; + + /* #entries in source ring - Must be a power of 2 */ + unsigned int src_nentries; + + /* Max source send size for this CE. + * This is also the minimum size of a destination buffer. + */ + unsigned int src_sz_max; + + /* #entries in destination ring - Must be a power of 2 */ + unsigned int dest_nentries; + + void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb); +}; + +#define CE_DESC_RING_ALIGN 8 + +struct ath12k_ce_ring { + /* Number of entries in this ring; must be power of 2 */ + unsigned int nentries; + unsigned int nentries_mask; + + /* For dest ring, this is the next index to be processed + * by software after it was/is received into. + * + * For src ring, this is the last descriptor that was sent + * and completion processed by software. + * + * Regardless of src or dest ring, this is an invariant + * (modulo ring size): + * write index >= read index >= sw_index + */ + unsigned int sw_index; + /* cached copy */ + unsigned int write_index; + + /* Start of DMA-coherent area reserved for descriptors */ + /* Host address space */ + void *base_addr_owner_space_unaligned; + /* CE address space */ + u32 base_addr_ce_space_unaligned; + + /* Actual start of descriptors. + * Aligned to descriptor-size boundary. + * Points into reserved DMA-coherent area, above. + */ + /* Host address space */ + void *base_addr_owner_space; + + /* CE address space */ + u32 base_addr_ce_space; + + /* HAL ring id */ + u32 hal_ring_id; + + /* keep last */ + struct sk_buff *skb[]; +}; + +struct ath12k_ce_pipe { + struct ath12k_base *ab; + u16 pipe_num; + unsigned int attr_flags; + unsigned int buf_sz; + unsigned int rx_buf_needed; + + void (*send_cb)(struct ath12k_ce_pipe *pipe); + void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb); + + struct tasklet_struct intr_tq; + struct ath12k_ce_ring *src_ring; + struct ath12k_ce_ring *dest_ring; + struct ath12k_ce_ring *status_ring; + u64 timestamp; +}; + +struct ath12k_ce { + struct ath12k_ce_pipe ce_pipe[CE_COUNT_MAX]; + /* Protects rings of all ce pipes */ + spinlock_t ce_lock; + struct ath12k_hp_update_timer hp_timer[CE_COUNT_MAX]; +}; + +extern const struct ce_attr ath12k_host_ce_config_qcn9274[]; +extern const struct ce_attr ath12k_host_ce_config_wcn7850[]; + +void ath12k_ce_cleanup_pipes(struct ath12k_base *ab); +void ath12k_ce_rx_replenish_retry(struct timer_list *t); +void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id); +int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id, + u16 transfer_id); +void ath12k_ce_rx_post_buf(struct ath12k_base *ab); +int ath12k_ce_init_pipes(struct ath12k_base *ab); +int ath12k_ce_alloc_pipes(struct ath12k_base *ab); +void ath12k_ce_free_pipes(struct ath12k_base *ab); +int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id); +void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id); +int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +int ath12k_ce_attr_attach(struct ath12k_base *ab); +void ath12k_ce_get_shadow_config(struct ath12k_base *ab, + u32 **shadow_cfg, u32 *shadow_cfg_len); +#endif diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c new file mode 100644 index 000000000000..a89e66653f04 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -0,0 +1,939 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/remoteproc.h> +#include <linux/firmware.h> +#include <linux/of.h> +#include "core.h" +#include "dp_tx.h" +#include "dp_rx.h" +#include "debug.h" +#include "hif.h" + +unsigned int ath12k_debug_mask; +module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); +MODULE_PARM_DESC(debug_mask, "Debugging mask"); + +int ath12k_core_suspend(struct ath12k_base *ab) +{ + int ret; + + if (!ab->hw_params->supports_suspend) + return -EOPNOTSUPP; + + /* TODO: there can frames in queues so for now add delay as a hack. + * Need to implement to handle and remove this delay. + */ + msleep(500); + + ret = ath12k_dp_rx_pktlog_stop(ab, true); + if (ret) { + ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", + ret); + return ret; + } + + ret = ath12k_dp_rx_pktlog_stop(ab, false); + if (ret) { + ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", + ret); + return ret; + } + + ath12k_hif_irq_disable(ab); + ath12k_hif_ce_irq_disable(ab); + + ret = ath12k_hif_suspend(ab); + if (ret) { + ath12k_warn(ab, "failed to suspend hif: %d\n", ret); + return ret; + } + + return 0; +} + +int ath12k_core_resume(struct ath12k_base *ab) +{ + int ret; + + if (!ab->hw_params->supports_suspend) + return -EOPNOTSUPP; + + ret = ath12k_hif_resume(ab); + if (ret) { + ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret); + return ret; + } + + ath12k_hif_ce_irq_enable(ab); + ath12k_hif_irq_enable(ab); + + ret = ath12k_dp_rx_pktlog_start(ab); + if (ret) { + ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, + size_t name_len) +{ + /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ + char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; + + if (ab->qmi.target.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ab->qmi.target.bdf_ext); + + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", + ath12k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id, + ab->qmi.target.board_id, variant); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name); + + return 0; +} + +const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, + const char *file) +{ + const struct firmware *fw; + char path[100]; + int ret; + + if (!file) + return ERR_PTR(-ENOENT); + + ath12k_core_create_firmware_path(ab, file, path, sizeof(path)); + + ret = firmware_request_nowarn(&fw, path, ab->dev); + if (ret) + return ERR_PTR(ret); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n", + path, fw->size); + + return fw; +} + +void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) +{ + if (!IS_ERR(bd->fw)) + release_firmware(bd->fw); + + memset(bd, 0, sizeof(*bd)); +} + +static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab, + struct ath12k_board_data *bd, + const void *buf, size_t buf_len, + const char *boardname, + int bd_ie_type) +{ + const struct ath12k_fw_ie *hdr; + bool name_match_found; + int ret, board_ie_id; + size_t board_ie_len; + const void *board_ie_data; + + name_match_found = false; + + /* go through ATH12K_BD_IE_BOARD_ elements */ + while (buf_len > sizeof(struct ath12k_fw_ie)) { + hdr = buf; + board_ie_id = le32_to_cpu(hdr->id); + board_ie_len = le32_to_cpu(hdr->len); + board_ie_data = hdr->data; + + buf_len -= sizeof(*hdr); + buf += sizeof(*hdr); + + if (buf_len < ALIGN(board_ie_len, 4)) { + ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n", + buf_len, ALIGN(board_ie_len, 4)); + ret = -EINVAL; + goto out; + } + + switch (board_ie_id) { + case ATH12K_BD_IE_BOARD_NAME: + ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "", + board_ie_data, board_ie_len); + + if (board_ie_len != strlen(boardname)) + break; + + ret = memcmp(board_ie_data, boardname, strlen(boardname)); + if (ret) + break; + + name_match_found = true; + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "boot found match for name '%s'", + boardname); + break; + case ATH12K_BD_IE_BOARD_DATA: + if (!name_match_found) + /* no match found */ + break; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "boot found board data for '%s'", boardname); + + bd->data = board_ie_data; + bd->len = board_ie_len; + + ret = 0; + goto out; + default: + ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n", + board_ie_id); + break; + } + + /* jump over the padding */ + board_ie_len = ALIGN(board_ie_len, 4); + + buf_len -= board_ie_len; + buf += board_ie_len; + } + + /* no match found */ + ret = -ENOENT; + +out: + return ret; +} + +static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, + struct ath12k_board_data *bd, + const char *boardname) +{ + size_t len, magic_len; + const u8 *data; + char *filename, filepath[100]; + size_t ie_len; + struct ath12k_fw_ie *hdr; + int ret, ie_id; + + filename = ATH12K_BOARD_API2_FILE; + + if (!bd->fw) + bd->fw = ath12k_core_firmware_request(ab, filename); + + if (IS_ERR(bd->fw)) + return PTR_ERR(bd->fw); + + data = bd->fw->data; + len = bd->fw->size; + + ath12k_core_create_firmware_path(ab, filename, + filepath, sizeof(filepath)); + + /* magic has extra null byte padded */ + magic_len = strlen(ATH12K_BOARD_MAGIC) + 1; + if (len < magic_len) { + ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n", + filepath, len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) { + ath12k_err(ab, "found invalid board magic\n"); + ret = -EINVAL; + goto err; + } + + /* magic is padded to 4 bytes */ + magic_len = ALIGN(magic_len, 4); + if (len < magic_len) { + ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n", + filepath, len); + ret = -EINVAL; + goto err; + } + + data += magic_len; + len -= magic_len; + + while (len > sizeof(struct ath12k_fw_ie)) { + hdr = (struct ath12k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < ALIGN(ie_len, 4)) { + ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n", + ie_id, ie_len, len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH12K_BD_IE_BOARD: + ret = ath12k_core_parse_bd_ie_board(ab, bd, data, + ie_len, + boardname, + ATH12K_BD_IE_BOARD); + if (ret == -ENOENT) + /* no match found, continue */ + break; + else if (ret) + /* there was an error, bail out */ + goto err; + /* either found or error, so stop searching */ + goto out; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + if (!bd->data || !bd->len) { + ath12k_err(ab, + "failed to fetch board data for %s from %s\n", + boardname, filepath); + ret = -ENODATA; + goto err; + } + + return 0; + +err: + ath12k_core_free_bdf(ab, bd); + return ret; +} + +int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, + struct ath12k_board_data *bd, + char *filename) +{ + bd->fw = ath12k_core_firmware_request(ab, filename); + if (IS_ERR(bd->fw)) + return PTR_ERR(bd->fw); + + bd->data = bd->fw->data; + bd->len = bd->fw->size; + + return 0; +} + +#define BOARD_NAME_SIZE 100 +int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) +{ + char boardname[BOARD_NAME_SIZE]; + int ret; + + ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); + if (ret) { + ath12k_err(ab, "failed to create board name: %d", ret); + return ret; + } + + ab->bd_api = 2; + ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname); + if (!ret) + goto success; + + ab->bd_api = 1; + ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); + if (ret) { + ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", + ab->hw_params->fw.dir); + return ret; + } + +success: + ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api); + return 0; +} + +static void ath12k_core_stop(struct ath12k_base *ab) +{ + if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) + ath12k_qmi_firmware_stop(ab); + + ath12k_hif_stop(ab); + ath12k_wmi_detach(ab); + ath12k_dp_rx_pdev_reo_cleanup(ab); + + /* De-Init of components as needed */ +} + +static int ath12k_core_soc_create(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_qmi_init_service(ab); + if (ret) { + ath12k_err(ab, "failed to initialize qmi :%d\n", ret); + return ret; + } + + ret = ath12k_hif_power_up(ab); + if (ret) { + ath12k_err(ab, "failed to power up :%d\n", ret); + goto err_qmi_deinit; + } + + return 0; + +err_qmi_deinit: + ath12k_qmi_deinit_service(ab); + return ret; +} + +static void ath12k_core_soc_destroy(struct ath12k_base *ab) +{ + ath12k_dp_free(ab); + ath12k_reg_free(ab); + ath12k_qmi_deinit_service(ab); +} + +static int ath12k_core_pdev_create(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_mac_register(ab); + if (ret) { + ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret); + return ret; + } + + ret = ath12k_dp_pdev_alloc(ab); + if (ret) { + ath12k_err(ab, "failed to attach DP pdev: %d\n", ret); + goto err_mac_unregister; + } + + return 0; + +err_mac_unregister: + ath12k_mac_unregister(ab); + + return ret; +} + +static void ath12k_core_pdev_destroy(struct ath12k_base *ab) +{ + ath12k_mac_unregister(ab); + ath12k_hif_irq_disable(ab); + ath12k_dp_pdev_free(ab); +} + +static int ath12k_core_start(struct ath12k_base *ab, + enum ath12k_firmware_mode mode) +{ + int ret; + + ret = ath12k_wmi_attach(ab); + if (ret) { + ath12k_err(ab, "failed to attach wmi: %d\n", ret); + return ret; + } + + ret = ath12k_htc_init(ab); + if (ret) { + ath12k_err(ab, "failed to init htc: %d\n", ret); + goto err_wmi_detach; + } + + ret = ath12k_hif_start(ab); + if (ret) { + ath12k_err(ab, "failed to start HIF: %d\n", ret); + goto err_wmi_detach; + } + + ret = ath12k_htc_wait_target(&ab->htc); + if (ret) { + ath12k_err(ab, "failed to connect to HTC: %d\n", ret); + goto err_hif_stop; + } + + ret = ath12k_dp_htt_connect(&ab->dp); + if (ret) { + ath12k_err(ab, "failed to connect to HTT: %d\n", ret); + goto err_hif_stop; + } + + ret = ath12k_wmi_connect(ab); + if (ret) { + ath12k_err(ab, "failed to connect wmi: %d\n", ret); + goto err_hif_stop; + } + + ret = ath12k_htc_start(&ab->htc); + if (ret) { + ath12k_err(ab, "failed to start HTC: %d\n", ret); + goto err_hif_stop; + } + + ret = ath12k_wmi_wait_for_service_ready(ab); + if (ret) { + ath12k_err(ab, "failed to receive wmi service ready event: %d\n", + ret); + goto err_hif_stop; + } + + ret = ath12k_mac_allocate(ab); + if (ret) { + ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n", + ret); + goto err_hif_stop; + } + + ath12k_dp_cc_config(ab); + + ath12k_dp_pdev_pre_alloc(ab); + + ret = ath12k_dp_rx_pdev_reo_setup(ab); + if (ret) { + ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret); + goto err_mac_destroy; + } + + ret = ath12k_wmi_cmd_init(ab); + if (ret) { + ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret); + goto err_reo_cleanup; + } + + ret = ath12k_wmi_wait_for_unified_ready(ab); + if (ret) { + ath12k_err(ab, "failed to receive wmi unified ready event: %d\n", + ret); + goto err_reo_cleanup; + } + + /* put hardware to DBS mode */ + if (ab->hw_params->single_pdev_only) { + ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); + if (ret) { + ath12k_err(ab, "failed to send dbs mode: %d\n", ret); + goto err_reo_cleanup; + } + } + + ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab); + if (ret) { + ath12k_err(ab, "failed to send htt version request message: %d\n", + ret); + goto err_reo_cleanup; + } + + return 0; + +err_reo_cleanup: + ath12k_dp_rx_pdev_reo_cleanup(ab); +err_mac_destroy: + ath12k_mac_destroy(ab); +err_hif_stop: + ath12k_hif_stop(ab); +err_wmi_detach: + ath12k_wmi_detach(ab); + return ret; +} + +static int ath12k_core_start_firmware(struct ath12k_base *ab, + enum ath12k_firmware_mode mode) +{ + int ret; + + ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3, + &ab->qmi.ce_cfg.shadow_reg_v3_len); + + ret = ath12k_qmi_firmware_start(ab, mode); + if (ret) { + ath12k_err(ab, "failed to send firmware start: %d\n", ret); + return ret; + } + + return ret; +} + +int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL); + if (ret) { + ath12k_err(ab, "failed to start firmware: %d\n", ret); + return ret; + } + + ret = ath12k_ce_init_pipes(ab); + if (ret) { + ath12k_err(ab, "failed to initialize CE: %d\n", ret); + goto err_firmware_stop; + } + + ret = ath12k_dp_alloc(ab); + if (ret) { + ath12k_err(ab, "failed to init DP: %d\n", ret); + goto err_firmware_stop; + } + + mutex_lock(&ab->core_lock); + ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL); + if (ret) { + ath12k_err(ab, "failed to start core: %d\n", ret); + goto err_dp_free; + } + + ret = ath12k_core_pdev_create(ab); + if (ret) { + ath12k_err(ab, "failed to create pdev core: %d\n", ret); + goto err_core_stop; + } + ath12k_hif_irq_enable(ab); + mutex_unlock(&ab->core_lock); + + return 0; + +err_core_stop: + ath12k_core_stop(ab); + ath12k_mac_destroy(ab); +err_dp_free: + ath12k_dp_free(ab); + mutex_unlock(&ab->core_lock); +err_firmware_stop: + ath12k_qmi_firmware_stop(ab); + + return ret; +} + +static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab) +{ + int ret; + + mutex_lock(&ab->core_lock); + ath12k_hif_irq_disable(ab); + ath12k_dp_pdev_free(ab); + ath12k_hif_stop(ab); + ath12k_wmi_detach(ab); + ath12k_dp_rx_pdev_reo_cleanup(ab); + mutex_unlock(&ab->core_lock); + + ath12k_dp_free(ab); + ath12k_hal_srng_deinit(ab); + + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + + ret = ath12k_hal_srng_init(ab); + if (ret) + return ret; + + clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); + + ret = ath12k_core_qmi_firmware_ready(ab); + if (ret) + goto err_hal_srng_deinit; + + clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); + + return 0; + +err_hal_srng_deinit: + ath12k_hal_srng_deinit(ab); + return ret; +} + +void ath12k_core_halt(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + ar->num_created_vdevs = 0; + ar->allocated_vdev_map = 0; + + ath12k_mac_scan_finish(ar); + ath12k_mac_peer_cleanup_all(ar); + cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->regd_update_work); + + rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); + synchronize_rcu(); + INIT_LIST_HEAD(&ar->arvifs); + idr_init(&ar->txmgmt_idr); +} + +static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev *pdev; + int i; + + spin_lock_bh(&ab->base_lock); + ab->stats.fw_crash_counter++; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar || ar->state == ATH12K_STATE_OFF) + continue; + + ieee80211_stop_queues(ar->hw); + ath12k_mac_drain_tx(ar); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->peer_assoc_done); + complete(&ar->peer_delete_done); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); + complete(&ar->bss_survey_done); + + wake_up(&ar->dp.tx_empty_waitq); + idr_for_each(&ar->txmgmt_idr, + ath12k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + } + + wake_up(&ab->wmi_ab.tx_credits_wq); + wake_up(&ab->peer_mapping_wq); +} + +static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev *pdev; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar || ar->state == ATH12K_STATE_OFF) + continue; + + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ATH12K_STATE_ON: + ar->state = ATH12K_STATE_RESTARTING; + ath12k_core_halt(ar); + ieee80211_restart_hw(ar->hw); + break; + case ATH12K_STATE_OFF: + ath12k_warn(ab, + "cannot restart radio %d that hasn't been started\n", + i); + break; + case ATH12K_STATE_RESTARTING: + break; + case ATH12K_STATE_RESTARTED: + ar->state = ATH12K_STATE_WEDGED; + fallthrough; + case ATH12K_STATE_WEDGED: + ath12k_warn(ab, + "device is wedged, will not restart radio %d\n", i); + break; + } + mutex_unlock(&ar->conf_mutex); + } + complete(&ab->driver_recovery); +} + +static void ath12k_core_restart(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); + int ret; + + if (!ab->is_reset) + ath12k_core_pre_reconfigure_recovery(ab); + + ret = ath12k_core_reconfigure_on_crash(ab); + if (ret) { + ath12k_err(ab, "failed to reconfigure driver on crash recovery\n"); + return; + } + + if (ab->is_reset) + complete_all(&ab->reconfigure_complete); + + if (!ab->is_reset) + ath12k_core_post_reconfigure_recovery(ab); +} + +static void ath12k_core_reset(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work); + int reset_count, fail_cont_count; + long time_left; + + if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) { + ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); + return; + } + + /* Sometimes the recovery will fail and then the next all recovery fail, + * this is to avoid infinite recovery since it can not recovery success + */ + fail_cont_count = atomic_read(&ab->fail_cont_count); + + if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL) + return; + + if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST && + time_before(jiffies, ab->reset_fail_timeout)) + return; + + reset_count = atomic_inc_return(&ab->reset_count); + + if (reset_count > 1) { + /* Sometimes it happened another reset worker before the previous one + * completed, then the second reset worker will destroy the previous one, + * thus below is to avoid that. + */ + ath12k_warn(ab, "already resetting count %d\n", reset_count); + + reinit_completion(&ab->reset_complete); + time_left = wait_for_completion_timeout(&ab->reset_complete, + ATH12K_RESET_TIMEOUT_HZ); + if (time_left) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n"); + atomic_dec(&ab->reset_count); + return; + } + + ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ; + /* Record the continuous recovery fail count when recovery failed*/ + fail_cont_count = atomic_inc_return(&ab->fail_cont_count); + } + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n"); + + ab->is_reset = true; + atomic_set(&ab->recovery_count, 0); + + ath12k_core_pre_reconfigure_recovery(ab); + + reinit_completion(&ab->reconfigure_complete); + ath12k_core_post_reconfigure_recovery(ab); + + reinit_completion(&ab->recovery_start); + atomic_set(&ab->recovery_start_count, 0); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); + + time_left = wait_for_completion_timeout(&ab->recovery_start, + ATH12K_RECOVER_START_TIMEOUT_HZ); + + ath12k_hif_power_down(ab); + ath12k_hif_power_up(ab); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); +} + +int ath12k_core_pre_init(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_hw_init(ab); + if (ret) { + ath12k_err(ab, "failed to init hw params: %d\n", ret); + return ret; + } + + return 0; +} + +int ath12k_core_init(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_core_soc_create(ab); + if (ret) { + ath12k_err(ab, "failed to create soc core: %d\n", ret); + return ret; + } + + return 0; +} + +void ath12k_core_deinit(struct ath12k_base *ab) +{ + mutex_lock(&ab->core_lock); + + ath12k_core_pdev_destroy(ab); + ath12k_core_stop(ab); + + mutex_unlock(&ab->core_lock); + + ath12k_hif_power_down(ab); + ath12k_mac_destroy(ab); + ath12k_core_soc_destroy(ab); +} + +void ath12k_core_free(struct ath12k_base *ab) +{ + destroy_workqueue(ab->workqueue_aux); + destroy_workqueue(ab->workqueue); + kfree(ab); +} + +struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, + enum ath12k_bus bus) +{ + struct ath12k_base *ab; + + ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL); + if (!ab) + return NULL; + + init_completion(&ab->driver_recovery); + + ab->workqueue = create_singlethread_workqueue("ath12k_wq"); + if (!ab->workqueue) + goto err_sc_free; + + ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq"); + if (!ab->workqueue_aux) + goto err_free_wq; + + mutex_init(&ab->core_lock); + spin_lock_init(&ab->base_lock); + init_completion(&ab->reset_complete); + init_completion(&ab->reconfigure_complete); + init_completion(&ab->recovery_start); + + INIT_LIST_HEAD(&ab->peers); + init_waitqueue_head(&ab->peer_mapping_wq); + init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); + INIT_WORK(&ab->restart_work, ath12k_core_restart); + INIT_WORK(&ab->reset_work, ath12k_core_reset); + timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); + init_completion(&ab->htc_suspend); + + ab->dev = dev; + ab->hif.bus = bus; + + return ab; + +err_free_wq: + destroy_workqueue(ab->workqueue); +err_sc_free: + kfree(ab); + return NULL; +} + +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards."); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h new file mode 100644 index 000000000000..a54ae74543c1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -0,0 +1,822 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_CORE_H +#define ATH12K_CORE_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/bitfield.h> +#include "qmi.h" +#include "htc.h" +#include "wmi.h" +#include "hal.h" +#include "dp.h" +#include "ce.h" +#include "mac.h" +#include "hw.h" +#include "hal_rx.h" +#include "reg.h" +#include "dbring.h" + +#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) + +#define ATH12K_TX_MGMT_NUM_PENDING_MAX 512 + +#define ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64 + +/* Pending management packets threshold for dropping probe responses */ +#define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) + +#define ATH12K_INVALID_HW_MAC_ID 0xFF +#define ATH12K_RX_RATE_TABLE_NUM 320 +#define ATH12K_RX_RATE_TABLE_11AX_NUM 576 + +#define ATH12K_MON_TIMER_INTERVAL 10 +#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ) +#define ATH12K_RESET_MAX_FAIL_COUNT_FIRST 3 +#define ATH12K_RESET_MAX_FAIL_COUNT_FINAL 5 +#define ATH12K_RESET_FAIL_TIMEOUT_HZ (20 * HZ) +#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) +#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ) + +enum wme_ac { + WME_AC_BE, + WME_AC_BK, + WME_AC_VI, + WME_AC_VO, + WME_NUM_AC +}; + +#define ATH12K_HT_MCS_MAX 7 +#define ATH12K_VHT_MCS_MAX 9 +#define ATH12K_HE_MCS_MAX 11 + +enum ath12k_crypt_mode { + /* Only use hardware crypto engine */ + ATH12K_CRYPT_MODE_HW, + /* Only use software crypto */ + ATH12K_CRYPT_MODE_SW, +}; + +static inline enum wme_ac ath12k_tid_to_ac(u32 tid) +{ + return (((tid == 0) || (tid == 3)) ? WME_AC_BE : + ((tid == 1) || (tid == 2)) ? WME_AC_BK : + ((tid == 4) || (tid == 5)) ? WME_AC_VI : + WME_AC_VO); +} + +enum ath12k_skb_flags { + ATH12K_SKB_HW_80211_ENCAP = BIT(0), + ATH12K_SKB_CIPHER_SET = BIT(1), +}; + +struct ath12k_skb_cb { + dma_addr_t paddr; + struct ath12k *ar; + struct ieee80211_vif *vif; + dma_addr_t paddr_ext_desc; + u32 cipher; + u8 flags; +}; + +struct ath12k_skb_rxcb { + dma_addr_t paddr; + bool is_first_msdu; + bool is_last_msdu; + bool is_continuation; + bool is_mcbc; + bool is_eapol; + struct hal_rx_desc *rx_desc; + u8 err_rel_src; + u8 err_code; + u8 mac_id; + u8 unmapped; + u8 is_frag; + u8 tid; + u16 peer_id; +}; + +enum ath12k_hw_rev { + ATH12K_HW_QCN9274_HW10, + ATH12K_HW_QCN9274_HW20, + ATH12K_HW_WCN7850_HW20 +}; + +enum ath12k_firmware_mode { + /* the default mode, standard 802.11 functionality */ + ATH12K_FIRMWARE_MODE_NORMAL, + + /* factory tests etc */ + ATH12K_FIRMWARE_MODE_FTM, +}; + +#define ATH12K_IRQ_NUM_MAX 57 +#define ATH12K_EXT_IRQ_NUM_MAX 16 + +struct ath12k_ext_irq_grp { + struct ath12k_base *ab; + u32 irqs[ATH12K_EXT_IRQ_NUM_MAX]; + u32 num_irq; + u32 grp_id; + u64 timestamp; + struct napi_struct napi; + struct net_device napi_ndev; +}; + +#define HEHANDLE_CAP_PHYINFO_SIZE 3 +#define HECAP_PHYINFO_SIZE 9 +#define HECAP_MACINFO_SIZE 5 +#define HECAP_TXRX_MCS_NSS_SIZE 2 +#define HECAP_PPET16_PPET8_MAX_SIZE 25 + +#define HE_PPET16_PPET8_SIZE 8 + +/* 802.11ax PPE (PPDU packet Extension) threshold */ +struct he_ppe_threshold { + u32 numss_m1; + u32 ru_mask; + u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE]; +}; + +struct ath12k_he { + u8 hecap_macinfo[HECAP_MACINFO_SIZE]; + u32 hecap_rxmcsnssmap; + u32 hecap_txmcsnssmap; + u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE]; + struct he_ppe_threshold hecap_ppet; + u32 heop_param; +}; + +#define MAX_RADIOS 3 + +enum { + WMI_HOST_TP_SCALE_MAX = 0, + WMI_HOST_TP_SCALE_50 = 1, + WMI_HOST_TP_SCALE_25 = 2, + WMI_HOST_TP_SCALE_12 = 3, + WMI_HOST_TP_SCALE_MIN = 4, + WMI_HOST_TP_SCALE_SIZE = 5, +}; + +enum ath12k_scan_state { + ATH12K_SCAN_IDLE, + ATH12K_SCAN_STARTING, + ATH12K_SCAN_RUNNING, + ATH12K_SCAN_ABORTING, +}; + +enum ath12k_dev_flags { + ATH12K_CAC_RUNNING, + ATH12K_FLAG_CRASH_FLUSH, + ATH12K_FLAG_RAW_MODE, + ATH12K_FLAG_HW_CRYPTO_DISABLED, + ATH12K_FLAG_RECOVERY, + ATH12K_FLAG_UNREGISTERING, + ATH12K_FLAG_REGISTERED, + ATH12K_FLAG_QMI_FAIL, + ATH12K_FLAG_HTC_SUSPEND_COMPLETE, +}; + +enum ath12k_monitor_flags { + ATH12K_FLAG_MONITOR_ENABLED, +}; + +struct ath12k_vif { + u32 vdev_id; + enum wmi_vdev_type vdev_type; + enum wmi_vdev_subtype vdev_subtype; + u32 beacon_interval; + u32 dtim_period; + u16 ast_hash; + u16 ast_idx; + u16 tcl_metadata; + u8 hal_addr_search_flags; + u8 search_type; + + struct ath12k *ar; + struct ieee80211_vif *vif; + + int bank_id; + u8 vdev_id_check_en; + + struct wmi_wmm_params_all_arg wmm_params; + struct list_head list; + union { + struct { + u32 uapsd; + } sta; + struct { + /* 127 stations; wmi limit */ + u8 tim_bitmap[16]; + u8 tim_len; + u32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + bool hidden_ssid; + /* P2P_IE with NoA attribute for P2P_GO case */ + u32 noa_len; + u8 *noa_data; + } ap; + } u; + + bool is_started; + bool is_up; + u32 aid; + u8 bssid[ETH_ALEN]; + struct cfg80211_bitrate_mask bitrate_mask; + int num_legacy_stations; + int rtscts_prot_mode; + int txpower; + bool rsnie_present; + bool wpaie_present; + struct ieee80211_chanctx_conf chanctx; + u32 key_cipher; + u8 tx_encap_type; + u8 vdev_stats_id; +}; + +struct ath12k_vif_iter { + u32 vdev_id; + struct ath12k_vif *arvif; +}; + +#define HAL_AST_IDX_INVALID 0xFFFF +#define HAL_RX_MAX_MCS 12 +#define HAL_RX_MAX_MCS_HT 31 +#define HAL_RX_MAX_MCS_VHT 9 +#define HAL_RX_MAX_MCS_HE 11 +#define HAL_RX_MAX_NSS 8 +#define HAL_RX_MAX_NUM_LEGACY_RATES 12 +#define ATH12K_RX_RATE_TABLE_11AX_NUM 576 +#define ATH12K_RX_RATE_TABLE_NUM 320 + +struct ath12k_rx_peer_rate_stats { + u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1]; + u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1]; + u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1]; + u64 nss_count[HAL_RX_MAX_NSS]; + u64 bw_count[HAL_RX_BW_MAX]; + u64 gi_count[HAL_RX_GI_MAX]; + u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES]; + u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM]; +}; + +struct ath12k_rx_peer_stats { + u64 num_msdu; + u64 num_mpdu_fcs_ok; + u64 num_mpdu_fcs_err; + u64 tcp_msdu_count; + u64 udp_msdu_count; + u64 other_msdu_count; + u64 ampdu_msdu_count; + u64 non_ampdu_msdu_count; + u64 stbc_count; + u64 beamformed_count; + u64 mcs_count[HAL_RX_MAX_MCS + 1]; + u64 nss_count[HAL_RX_MAX_NSS]; + u64 bw_count[HAL_RX_BW_MAX]; + u64 gi_count[HAL_RX_GI_MAX]; + u64 coding_count[HAL_RX_SU_MU_CODING_MAX]; + u64 tid_count[IEEE80211_NUM_TIDS + 1]; + u64 pream_cnt[HAL_RX_PREAMBLE_MAX]; + u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX]; + u64 rx_duration; + u64 dcm_count; + u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX]; + struct ath12k_rx_peer_rate_stats pkt_stats; + struct ath12k_rx_peer_rate_stats byte_stats; +}; + +#define ATH12K_HE_MCS_NUM 12 +#define ATH12K_VHT_MCS_NUM 10 +#define ATH12K_BW_NUM 5 +#define ATH12K_NSS_NUM 4 +#define ATH12K_LEGACY_NUM 12 +#define ATH12K_GI_NUM 4 +#define ATH12K_HT_MCS_NUM 32 + +enum ath12k_pkt_rx_err { + ATH12K_PKT_RX_ERR_FCS, + ATH12K_PKT_RX_ERR_TKIP, + ATH12K_PKT_RX_ERR_CRYPT, + ATH12K_PKT_RX_ERR_PEER_IDX_INVAL, + ATH12K_PKT_RX_ERR_MAX, +}; + +enum ath12k_ampdu_subfrm_num { + ATH12K_AMPDU_SUBFRM_NUM_10, + ATH12K_AMPDU_SUBFRM_NUM_20, + ATH12K_AMPDU_SUBFRM_NUM_30, + ATH12K_AMPDU_SUBFRM_NUM_40, + ATH12K_AMPDU_SUBFRM_NUM_50, + ATH12K_AMPDU_SUBFRM_NUM_60, + ATH12K_AMPDU_SUBFRM_NUM_MORE, + ATH12K_AMPDU_SUBFRM_NUM_MAX, +}; + +enum ath12k_amsdu_subfrm_num { + ATH12K_AMSDU_SUBFRM_NUM_1, + ATH12K_AMSDU_SUBFRM_NUM_2, + ATH12K_AMSDU_SUBFRM_NUM_3, + ATH12K_AMSDU_SUBFRM_NUM_4, + ATH12K_AMSDU_SUBFRM_NUM_MORE, + ATH12K_AMSDU_SUBFRM_NUM_MAX, +}; + +enum ath12k_counter_type { + ATH12K_COUNTER_TYPE_BYTES, + ATH12K_COUNTER_TYPE_PKTS, + ATH12K_COUNTER_TYPE_MAX, +}; + +enum ath12k_stats_type { + ATH12K_STATS_TYPE_SUCC, + ATH12K_STATS_TYPE_FAIL, + ATH12K_STATS_TYPE_RETRY, + ATH12K_STATS_TYPE_AMPDU, + ATH12K_STATS_TYPE_MAX, +}; + +struct ath12k_htt_data_stats { + u64 legacy[ATH12K_COUNTER_TYPE_MAX][ATH12K_LEGACY_NUM]; + u64 ht[ATH12K_COUNTER_TYPE_MAX][ATH12K_HT_MCS_NUM]; + u64 vht[ATH12K_COUNTER_TYPE_MAX][ATH12K_VHT_MCS_NUM]; + u64 he[ATH12K_COUNTER_TYPE_MAX][ATH12K_HE_MCS_NUM]; + u64 bw[ATH12K_COUNTER_TYPE_MAX][ATH12K_BW_NUM]; + u64 nss[ATH12K_COUNTER_TYPE_MAX][ATH12K_NSS_NUM]; + u64 gi[ATH12K_COUNTER_TYPE_MAX][ATH12K_GI_NUM]; + u64 transmit_type[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RECEPTION_TYPE_MAX]; + u64 ru_loc[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RU_ALLOC_TYPE_MAX]; +}; + +struct ath12k_htt_tx_stats { + struct ath12k_htt_data_stats stats[ATH12K_STATS_TYPE_MAX]; + u64 tx_duration; + u64 ba_fails; + u64 ack_fails; + u16 ru_start; + u16 ru_tones; + u32 mu_group[MAX_MU_GROUP_ID]; +}; + +struct ath12k_per_ppdu_tx_stats { + u16 succ_pkts; + u16 failed_pkts; + u16 retry_pkts; + u32 succ_bytes; + u32 failed_bytes; + u32 retry_bytes; +}; + +struct ath12k_wbm_tx_stats { + u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX]; +}; + +struct ath12k_sta { + struct ath12k_vif *arvif; + + /* the following are protected by ar->data_lock */ + u32 changed; /* IEEE80211_RC_* */ + u32 bw; + u32 nss; + u32 smps; + enum hal_pn_type pn_type; + + struct work_struct update_wk; + struct rate_info txrate; + struct rate_info last_txrate; + u64 rx_duration; + u64 tx_duration; + u8 rssi_comb; + struct ath12k_rx_peer_stats *rx_stats; + struct ath12k_wbm_tx_stats *wbm_tx_stats; +}; + +#define ATH12K_MIN_5G_FREQ 4150 +#define ATH12K_MIN_6G_FREQ 5945 +#define ATH12K_MAX_6G_FREQ 7115 +#define ATH12K_NUM_CHANS 100 +#define ATH12K_MAX_5G_CHAN 173 + +enum ath12k_state { + ATH12K_STATE_OFF, + ATH12K_STATE_ON, + ATH12K_STATE_RESTARTING, + ATH12K_STATE_RESTARTED, + ATH12K_STATE_WEDGED, + /* Add other states as required */ +}; + +/* Antenna noise floor */ +#define ATH12K_DEFAULT_NOISE_FLOOR -95 + +struct ath12k_fw_stats { + u32 pdev_id; + u32 stats_id; + struct list_head pdevs; + struct list_head vdevs; + struct list_head bcn; +}; + +struct ath12k_per_peer_tx_stats { + u32 succ_bytes; + u32 retry_bytes; + u32 failed_bytes; + u32 duration; + u16 succ_pkts; + u16 retry_pkts; + u16 failed_pkts; + u16 ru_start; + u16 ru_tones; + u8 ba_fails; + u8 ppdu_type; + u32 mu_grpid; + u32 mu_pos; + bool is_ampdu; +}; + +#define ATH12K_FLUSH_TIMEOUT (5 * HZ) +#define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ) + +struct ath12k { + struct ath12k_base *ab; + struct ath12k_pdev *pdev; + struct ieee80211_hw *hw; + struct ieee80211_ops *ops; + struct ath12k_wmi_pdev *wmi; + struct ath12k_pdev_dp dp; + u8 mac_addr[ETH_ALEN]; + u32 ht_cap_info; + u32 vht_cap_info; + struct ath12k_he ar_he; + enum ath12k_state state; + bool supports_6ghz; + struct { + struct completion started; + struct completion completed; + struct completion on_channel; + struct delayed_work timeout; + enum ath12k_scan_state state; + bool is_roc; + int vdev_id; + int roc_freq; + bool roc_notify; + } scan; + + struct { + struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; + struct ieee80211_sband_iftype_data + iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; + } mac; + + unsigned long dev_flags; + unsigned int filter_flags; + unsigned long monitor_flags; + u32 min_tx_power; + u32 max_tx_power; + u32 txpower_limit_2g; + u32 txpower_limit_5g; + u32 txpower_scale; + u32 power_scale; + u32 chan_tx_pwr; + u32 num_stations; + u32 max_num_stations; + bool monitor_present; + /* To synchronize concurrent synchronous mac80211 callback operations, + * concurrent debugfs configuration and concurrent FW statistics events. + */ + struct mutex conf_mutex; + /* protects the radio specific data like debug stats, ppdu_stats_info stats, + * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info, + * channel context data, survey info, test mode data. + */ + spinlock_t data_lock; + + struct list_head arvifs; + /* should never be NULL; needed for regular htt rx */ + struct ieee80211_channel *rx_channel; + + /* valid during scan; needed for mgmt rx during scan */ + struct ieee80211_channel *scan_channel; + + u8 cfg_tx_chainmask; + u8 cfg_rx_chainmask; + u8 num_rx_chains; + u8 num_tx_chains; + /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */ + u8 pdev_idx; + u8 lmac_id; + + struct completion peer_assoc_done; + struct completion peer_delete_done; + + int install_key_status; + struct completion install_key_done; + + int last_wmi_vdev_start_status; + struct completion vdev_setup_done; + struct completion vdev_delete_done; + + int num_peers; + int max_num_peers; + u32 num_started_vdevs; + u32 num_created_vdevs; + unsigned long long allocated_vdev_map; + + struct idr txmgmt_idr; + /* protects txmgmt_idr data */ + spinlock_t txmgmt_idr_lock; + atomic_t num_pending_mgmt_tx; + + /* cycle count is reported twice for each visited channel during scan. + * access protected by data_lock + */ + u32 survey_last_rx_clear_count; + u32 survey_last_cycle_count; + + /* Channel info events are expected to come in pairs without and with + * COMPLETE flag set respectively for each channel visit during scan. + * + * However there are deviations from this rule. This flag is used to + * avoid reporting garbage data. + */ + bool ch_info_can_report_survey; + struct survey_info survey[ATH12K_NUM_CHANS]; + struct completion bss_survey_done; + + struct work_struct regd_update_work; + + struct work_struct wmi_mgmt_tx_work; + struct sk_buff_head wmi_mgmt_tx_queue; + + struct ath12k_per_peer_tx_stats peer_tx_stats; + struct list_head ppdu_stats_info; + u32 ppdu_stat_list_depth; + + struct ath12k_per_peer_tx_stats cached_stats; + u32 last_ppdu_id; + u32 cached_ppdu_id; + + bool dfs_block_radar_events; + bool monitor_conf_enabled; + bool monitor_vdev_created; + bool monitor_started; + int monitor_vdev_id; +}; + +struct ath12k_band_cap { + u32 phy_id; + u32 max_bw_supported; + u32 ht_cap_info; + u32 he_cap_info[2]; + u32 he_mcs; + u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE]; + struct ath12k_wmi_ppe_threshold_arg he_ppet; + u16 he_6ghz_capa; +}; + +struct ath12k_pdev_cap { + u32 supported_bands; + u32 ampdu_density; + u32 vht_cap; + u32 vht_mcs; + u32 he_mcs; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 tx_chain_mask_shift; + u32 rx_chain_mask_shift; + struct ath12k_band_cap band[NUM_NL80211_BANDS]; +}; + +struct mlo_timestamp { + u32 info; + u32 sync_timestamp_lo_us; + u32 sync_timestamp_hi_us; + u32 mlo_offset_lo; + u32 mlo_offset_hi; + u32 mlo_offset_clks; + u32 mlo_comp_clks; + u32 mlo_comp_timer; +}; + +struct ath12k_pdev { + struct ath12k *ar; + u32 pdev_id; + struct ath12k_pdev_cap cap; + u8 mac_addr[ETH_ALEN]; + struct mlo_timestamp timestamp; +}; + +struct ath12k_board_data { + const struct firmware *fw; + const void *data; + size_t len; +}; + +struct ath12k_soc_dp_tx_err_stats { + /* TCL Ring Descriptor unavailable */ + u32 desc_na[DP_TCL_NUM_RING_MAX]; + /* Other failures during dp_tx due to mem allocation failure + * idr unavailable etc. + */ + atomic_t misc_fail; +}; + +struct ath12k_soc_dp_stats { + u32 err_ring_pkts; + u32 invalid_rbm; + u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX]; + u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; + u32 hal_reo_error[DP_REO_DST_RING_MAX]; + struct ath12k_soc_dp_tx_err_stats tx_err; +}; + +/* Master structure to hold the hw data which may be used in core module */ +struct ath12k_base { + enum ath12k_hw_rev hw_rev; + struct platform_device *pdev; + struct device *dev; + struct ath12k_qmi qmi; + struct ath12k_wmi_base wmi_ab; + struct completion fw_ready; + int num_radios; + /* HW channel counters frequency value in hertz common to all MACs */ + u32 cc_freq_hz; + + struct ath12k_htc htc; + + struct ath12k_dp dp; + + void __iomem *mem; + unsigned long mem_len; + + struct { + enum ath12k_bus bus; + const struct ath12k_hif_ops *ops; + } hif; + + struct ath12k_ce ce; + struct timer_list rx_replenish_retry; + struct ath12k_hal hal; + /* To synchronize core_start/core_stop */ + struct mutex core_lock; + /* Protects data like peers */ + spinlock_t base_lock; + struct ath12k_pdev pdevs[MAX_RADIOS]; + struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS]; + struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS]; + unsigned long long free_vdev_map; + unsigned long long free_vdev_stats_id_map; + struct list_head peers; + wait_queue_head_t peer_mapping_wq; + u8 mac_addr[ETH_ALEN]; + bool wmi_ready; + u32 wlan_init_status; + int irq_num[ATH12K_IRQ_NUM_MAX]; + struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + struct napi_struct *napi; + struct ath12k_wmi_target_cap_arg target_caps; + u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; + bool pdevs_macaddr_valid; + int bd_api; + + const struct ath12k_hw_params *hw_params; + + const struct firmware *cal_file; + + /* Below regd's are protected by ab->data_lock */ + /* This is the regd set for every radio + * by the firmware during initializatin + */ + struct ieee80211_regdomain *default_regd[MAX_RADIOS]; + /* This regd is set during dynamic country setting + * This may or may not be used during the runtime + */ + struct ieee80211_regdomain *new_regd[MAX_RADIOS]; + + /* Current DFS Regulatory */ + enum ath12k_dfs_region dfs_region; + struct ath12k_soc_dp_stats soc_stats; + + unsigned long dev_flags; + struct completion driver_recovery; + struct workqueue_struct *workqueue; + struct work_struct restart_work; + struct workqueue_struct *workqueue_aux; + struct work_struct reset_work; + atomic_t reset_count; + atomic_t recovery_count; + atomic_t recovery_start_count; + bool is_reset; + struct completion reset_complete; + struct completion reconfigure_complete; + struct completion recovery_start; + /* continuous recovery fail count */ + atomic_t fail_cont_count; + unsigned long reset_fail_timeout; + struct { + /* protected by data_lock */ + u32 fw_crash_counter; + } stats; + u32 pktlog_defs_checksum; + + struct ath12k_dbring_cap *db_caps; + u32 num_db_cap; + + struct timer_list mon_reap_timer; + + struct completion htc_suspend; + + u64 fw_soc_drop_count; + bool static_window_map; + + /* must be last */ + u8 drv_priv[] __aligned(sizeof(void *)); +}; + +int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); +int ath12k_core_pre_init(struct ath12k_base *ab); +int ath12k_core_init(struct ath12k_base *ath12k); +void ath12k_core_deinit(struct ath12k_base *ath12k); +struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, + enum ath12k_bus bus); +void ath12k_core_free(struct ath12k_base *ath12k); +int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, + struct ath12k_board_data *bd, + char *filename); +int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, + struct ath12k_board_data *bd); +void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); +int ath12k_core_check_dt(struct ath12k_base *ath12k); + +void ath12k_core_halt(struct ath12k *ar); +int ath12k_core_resume(struct ath12k_base *ab); +int ath12k_core_suspend(struct ath12k_base *ab); + +const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, + const char *filename); + +static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state) +{ + switch (state) { + case ATH12K_SCAN_IDLE: + return "idle"; + case ATH12K_SCAN_STARTING: + return "starting"; + case ATH12K_SCAN_RUNNING: + return "running"; + case ATH12K_SCAN_ABORTING: + return "aborting"; + } + + return "unknown"; +} + +static inline struct ath12k_skb_cb *ATH12K_SKB_CB(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ath12k_skb_cb) > + IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + return (struct ath12k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; +} + +static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ath12k_skb_rxcb) > sizeof(skb->cb)); + return (struct ath12k_skb_rxcb *)skb->cb; +} + +static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif) +{ + return (struct ath12k_vif *)vif->drv_priv; +} + +static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab, + int mac_id) +{ + return ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar; +} + +static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab, + const char *filename, + void *buf, size_t buf_len) +{ + snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR, + ab->hw_params->fw.dir, filename); +} + +static inline const char *ath12k_bus_str(enum ath12k_bus bus) +{ + switch (bus) { + case ATH12K_BUS_PCI: + return "pci"; + } + + return "unknown"; +} + +#endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c new file mode 100644 index 000000000000..8fbf868e6f7e --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dbring.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "debug.h" + +static int ath12k_dbring_bufs_replenish(struct ath12k *ar, + struct ath12k_dbring *ring, + struct ath12k_dbring_element *buff, + gfp_t gfp) +{ + struct ath12k_base *ab = ar->ab; + struct hal_srng *srng; + dma_addr_t paddr; + void *ptr_aligned, *ptr_unaligned, *desc; + int ret; + int buf_id; + u32 cookie; + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + + lockdep_assert_held(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + ptr_unaligned = buff->payload; + ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); + paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, + DMA_FROM_DEVICE); + + ret = dma_mapping_error(ab->dev, paddr); + if (ret) + goto err; + + spin_lock_bh(&ring->idr_lock); + buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); + spin_unlock_bh(&ring->idr_lock); + if (buf_id < 0) { + ret = -ENOBUFS; + goto err_dma_unmap; + } + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -ENOENT; + goto err_idr_remove; + } + + buff->paddr = paddr; + + cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) | + u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); + + ath12k_hal_srng_access_end(ab, srng); + + return 0; + +err_idr_remove: + spin_lock_bh(&ring->idr_lock); + idr_remove(&ring->bufs_idr, buf_id); + spin_unlock_bh(&ring->idr_lock); +err_dma_unmap: + dma_unmap_single(ab->dev, paddr, ring->buf_sz, + DMA_FROM_DEVICE); +err: + ath12k_hal_srng_access_end(ab, srng); + return ret; +} + +static int ath12k_dbring_fill_bufs(struct ath12k *ar, + struct ath12k_dbring *ring, + gfp_t gfp) +{ + struct ath12k_dbring_element *buff; + struct hal_srng *srng; + struct ath12k_base *ab = ar->ab; + int num_remain, req_entries, num_free; + u32 align; + int size, ret; + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + + spin_lock_bh(&srng->lock); + + num_free = ath12k_hal_srng_src_num_free(ab, srng, true); + req_entries = min(num_free, ring->bufs_max); + num_remain = req_entries; + align = ring->buf_align; + size = sizeof(*buff) + ring->buf_sz + align - 1; + + while (num_remain > 0) { + buff = kzalloc(size, gfp); + if (!buff) + break; + + ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp); + if (ret) { + ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n", + num_remain, req_entries); + kfree(buff); + break; + } + num_remain--; + } + + spin_unlock_bh(&srng->lock); + + return num_remain; +} + +int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar, + struct ath12k_dbring *ring, + enum wmi_direct_buffer_module id) +{ + struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0}; + int ret; + + if (id >= WMI_DIRECT_BUF_MAX) + return -EINVAL; + + arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id); + arg.module_id = id; + arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); + arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); + arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); + arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); + arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); + arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); + arg.num_elems = ring->bufs_max; + arg.buf_size = ring->buf_sz; + arg.num_resp_per_event = ring->num_resp_per_event; + arg.event_timeout_ms = ring->event_timeout_ms; + + ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to setup db ring cfg\n"); + return ret; + } + + return 0; +} + +int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring, + u32 num_resp_per_event, u32 event_timeout_ms, + int (*handler)(struct ath12k *, + struct ath12k_dbring_data *)) +{ + if (WARN_ON(!ring)) + return -EINVAL; + + ring->num_resp_per_event = num_resp_per_event; + ring->event_timeout_ms = event_timeout_ms; + ring->handler = handler; + + return 0; +} + +int ath12k_dbring_buf_setup(struct ath12k *ar, + struct ath12k_dbring *ring, + struct ath12k_dbring_cap *db_cap) +{ + struct ath12k_base *ab = ar->ab; + struct hal_srng *srng; + int ret; + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + ring->bufs_max = ring->refill_srng.size / + ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF); + + ring->buf_sz = db_cap->min_buf_sz; + ring->buf_align = db_cap->min_buf_align; + ring->pdev_id = db_cap->pdev_id; + ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng); + ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng); + + ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL); + + return ret; +} + +int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring, + int ring_num, int num_entries) +{ + int ret; + + ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, + ring_num, ar->pdev_idx, num_entries); + if (ret < 0) { + ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", + ret, ring_num); + goto err; + } + + return 0; +err: + ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng); + return ret; +} + +int ath12k_dbring_get_cap(struct ath12k_base *ab, + u8 pdev_idx, + enum wmi_direct_buffer_module id, + struct ath12k_dbring_cap *db_cap) +{ + int i; + + if (!ab->num_db_cap || !ab->db_caps) + return -ENOENT; + + if (id >= WMI_DIRECT_BUF_MAX) + return -EINVAL; + + for (i = 0; i < ab->num_db_cap; i++) { + if (pdev_idx == ab->db_caps[i].pdev_id && + id == ab->db_caps[i].id) { + *db_cap = ab->db_caps[i]; + + return 0; + } + } + + return -ENOENT; +} + +int ath12k_dbring_buffer_release_event(struct ath12k_base *ab, + struct ath12k_dbring_buf_release_event *ev) +{ + struct ath12k_dbring *ring = NULL; + struct hal_srng *srng; + struct ath12k *ar; + struct ath12k_dbring_element *buff; + struct ath12k_dbring_data handler_data; + struct ath12k_buffer_addr desc; + u8 *vaddr_unalign; + u32 num_entry, num_buff_reaped; + u8 pdev_idx, rbm; + u32 cookie; + int buf_id; + int size; + dma_addr_t paddr; + int ret = 0; + + pdev_idx = le32_to_cpu(ev->fixed.pdev_id); + + if (pdev_idx >= ab->num_radios) { + ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx); + return -EINVAL; + } + + if (ev->fixed.num_buf_release_entry != + ev->fixed.num_meta_data_entry) { + ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", + ev->fixed.num_buf_release_entry, + ev->fixed.num_meta_data_entry); + return -EINVAL; + } + + ar = ab->pdevs[pdev_idx].ar; + + rcu_read_lock(); + if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { + ret = -EINVAL; + goto rcu_unlock; + } + + switch (ev->fixed.module_id) { + case WMI_DIRECT_BUF_SPECTRAL: + break; + default: + ring = NULL; + ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", + ev->fixed.module_id); + break; + } + + if (!ring) { + ret = -EINVAL; + goto rcu_unlock; + } + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry); + size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; + num_buff_reaped = 0; + + spin_lock_bh(&srng->lock); + + while (num_buff_reaped < num_entry) { + desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; + desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; + handler_data.meta = ev->meta_data[num_buff_reaped]; + + num_buff_reaped++; + + ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); + + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&ring->idr_lock); + buff = idr_find(&ring->bufs_idr, buf_id); + if (!buff) { + spin_unlock_bh(&ring->idr_lock); + continue; + } + idr_remove(&ring->bufs_idr, buf_id); + spin_unlock_bh(&ring->idr_lock); + + dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, + DMA_FROM_DEVICE); + + if (ring->handler) { + vaddr_unalign = buff->payload; + handler_data.data = PTR_ALIGN(vaddr_unalign, + ring->buf_align); + handler_data.data_sz = ring->buf_sz; + + ring->handler(ar, &handler_data); + } + + memset(buff, 0, size); + ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC); + } + + spin_unlock_bh(&srng->lock); + +rcu_unlock: + rcu_read_unlock(); + + return ret; +} + +void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) +{ + ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng); +} + +void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) +{ + struct ath12k_dbring_element *buff; + int buf_id; + + spin_lock_bh(&ring->idr_lock); + idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { + idr_remove(&ring->bufs_idr, buf_id); + dma_unmap_single(ar->ab->dev, buff->paddr, + ring->buf_sz, DMA_FROM_DEVICE); + kfree(buff); + } + + idr_destroy(&ring->bufs_idr); + spin_unlock_bh(&ring->idr_lock); +} diff --git a/drivers/net/wireless/ath/ath12k/dbring.h b/drivers/net/wireless/ath/ath12k/dbring.h new file mode 100644 index 000000000000..e1c0eba774ec --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dbring.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_DBRING_H +#define ATH12K_DBRING_H + +#include <linux/types.h> +#include <linux/idr.h> +#include <linux/spinlock.h> +#include "dp.h" + +struct ath12k_dbring_element { + dma_addr_t paddr; + u8 payload[]; +}; + +struct ath12k_dbring_data { + void *data; + u32 data_sz; + struct ath12k_wmi_dma_buf_release_meta_data_params meta; +}; + +struct ath12k_dbring_buf_release_event { + struct ath12k_wmi_dma_buf_release_fixed_params fixed; + const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; + const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; + u32 num_buf_entry; + u32 num_meta; +}; + +struct ath12k_dbring_cap { + u32 pdev_id; + enum wmi_direct_buffer_module id; + u32 min_elem; + u32 min_buf_sz; + u32 min_buf_align; +}; + +struct ath12k_dbring { + struct dp_srng refill_srng; + struct idr bufs_idr; + /* Protects bufs_idr */ + spinlock_t idr_lock; + dma_addr_t tp_addr; + dma_addr_t hp_addr; + int bufs_max; + u32 pdev_id; + u32 buf_sz; + u32 buf_align; + u32 num_resp_per_event; + u32 event_timeout_ms; + int (*handler)(struct ath12k *ar, struct ath12k_dbring_data *data); +}; + +int ath12k_dbring_set_cfg(struct ath12k *ar, + struct ath12k_dbring *ring, + u32 num_resp_per_event, + u32 event_timeout_ms, + int (*handler)(struct ath12k *, + struct ath12k_dbring_data *)); +int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar, + struct ath12k_dbring *ring, + enum wmi_direct_buffer_module id); +int ath12k_dbring_buf_setup(struct ath12k *ar, + struct ath12k_dbring *ring, + struct ath12k_dbring_cap *db_cap); +int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring, + int ring_num, int num_entries); +int ath12k_dbring_buffer_release_event(struct ath12k_base *ab, + struct ath12k_dbring_buf_release_event *ev); +int ath12k_dbring_get_cap(struct ath12k_base *ab, + u8 pdev_idx, + enum wmi_direct_buffer_module id, + struct ath12k_dbring_cap *db_cap); +void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring); +void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring); +#endif /* ATH12K_DBRING_H */ diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c new file mode 100644 index 000000000000..67893923e010 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debug.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/vmalloc.h> +#include "core.h" +#include "debug.h" + +void ath12k_info(struct ath12k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_info(ab->dev, "%pV", &vaf); + /* TODO: Trace the log */ + va_end(args); +} + +void ath12k_err(struct ath12k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_err(ab->dev, "%pV", &vaf); + /* TODO: Trace the log */ + va_end(args); +} + +void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_warn_ratelimited(ab->dev, "%pV", &vaf); + /* TODO: Trace the log */ + va_end(args); +} + +#ifdef CONFIG_ATH12K_DEBUG + +void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (ath12k_debug_mask & mask) + dev_dbg(ab->dev, "%pV", &vaf); + + /* TODO: trace log */ + + va_end(args); +} + +void ath12k_dbg_dump(struct ath12k_base *ab, + enum ath12k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ + char linebuf[256]; + size_t linebuflen; + const void *ptr; + + if (ath12k_debug_mask & mask) { + if (msg) + __ath12k_dbg(ab, mask, "%s\n", msg); + + for (ptr = buf; (ptr - buf) < len; ptr += 16) { + linebuflen = 0; + linebuflen += scnprintf(linebuf + linebuflen, + sizeof(linebuf) - linebuflen, + "%s%08x: ", + (prefix ? prefix : ""), + (unsigned int)(ptr - buf)); + hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, + linebuf + linebuflen, + sizeof(linebuf) - linebuflen, true); + dev_dbg(ab->dev, "%s\n", linebuf); + } + } +} + +#endif /* CONFIG_ATH12K_DEBUG */ diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h new file mode 100644 index 000000000000..aa685295f8a4 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH12K_DEBUG_H_ +#define _ATH12K_DEBUG_H_ + +#include "trace.h" + +enum ath12k_debug_mask { + ATH12K_DBG_AHB = 0x00000001, + ATH12K_DBG_WMI = 0x00000002, + ATH12K_DBG_HTC = 0x00000004, + ATH12K_DBG_DP_HTT = 0x00000008, + ATH12K_DBG_MAC = 0x00000010, + ATH12K_DBG_BOOT = 0x00000020, + ATH12K_DBG_QMI = 0x00000040, + ATH12K_DBG_DATA = 0x00000080, + ATH12K_DBG_MGMT = 0x00000100, + ATH12K_DBG_REG = 0x00000200, + ATH12K_DBG_TESTMODE = 0x00000400, + ATH12K_DBG_HAL = 0x00000800, + ATH12K_DBG_PCI = 0x00001000, + ATH12K_DBG_DP_TX = 0x00002000, + ATH12K_DBG_DP_RX = 0x00004000, + ATH12K_DBG_ANY = 0xffffffff, +}; + +__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...); +__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...); +__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...); + +extern unsigned int ath12k_debug_mask; + +#ifdef CONFIG_ATH12K_DEBUG +__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab, + enum ath12k_debug_mask mask, + const char *fmt, ...); +void ath12k_dbg_dump(struct ath12k_base *ab, + enum ath12k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len); +#else /* CONFIG_ATH12K_DEBUG */ +static inline void __ath12k_dbg(struct ath12k_base *ab, + enum ath12k_debug_mask dbg_mask, + const char *fmt, ...) +{ +} + +static inline void ath12k_dbg_dump(struct ath12k_base *ab, + enum ath12k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ +} +#endif /* CONFIG_ATH12K_DEBUG */ + +#define ath12k_dbg(ar, dbg_mask, fmt, ...) \ +do { \ + typeof(dbg_mask) mask = (dbg_mask); \ + if (ath12k_debug_mask & mask) \ + __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \ +} while (0) + +#endif /* _ATH12K_DEBUG_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c new file mode 100644 index 000000000000..eb0261500ab0 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -0,0 +1,1580 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <crypto/hash.h> +#include "core.h" +#include "dp_tx.h" +#include "hal_tx.h" +#include "hif.h" +#include "debug.h" +#include "dp_rx.h" +#include "peer.h" +#include "dp_mon.h" + +static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + + /* TODO: Any other peer specific DP cleanup */ + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find(ab, vdev_id, addr); + if (!peer) { + ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n", + addr, vdev_id); + spin_unlock_bh(&ab->base_lock); + return; + } + + ath12k_dp_rx_peer_tid_cleanup(ar, peer); + crypto_free_shash(peer->tfm_mmic); + spin_unlock_bh(&ab->base_lock); +} + +int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + u32 reo_dest; + int ret = 0, tid; + + /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ + reo_dest = ar->dp.mac_id + 1; + ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id, + WMI_PEER_SET_DEFAULT_ROUTING, + DP_RX_HASH_ENABLE | (reo_dest << 1)); + + if (ret) { + ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", + ret, addr, vdev_id); + return ret; + } + + for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { + ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0, + HAL_PN_TYPE_NONE); + if (ret) { + ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", + tid, ret); + goto peer_clean; + } + } + + ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id); + if (ret) { + ath12k_warn(ab, "failed to setup rx defrag context\n"); + goto peer_clean; + } + + /* TODO: Setup other peer specific resource used in data path */ + + return 0; + +peer_clean: + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find(ab, vdev_id, addr); + if (!peer) { + ath12k_warn(ab, "failed to find the peer to del rx tid\n"); + spin_unlock_bh(&ab->base_lock); + return -ENOENT; + } + + for (; tid >= 0; tid--) + ath12k_dp_rx_peer_tid_delete(ar, peer, tid); + + spin_unlock_bh(&ab->base_lock); + + return ret; +} + +void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring) +{ + if (!ring->vaddr_unaligned) + return; + + dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned); + + ring->vaddr_unaligned = NULL; +} + +static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) +{ + int ext_group_num; + u8 mask = 1 << ring_num; + + for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX; + ext_group_num++) { + if (mask & grp_mask[ext_group_num]) + return ext_group_num; + } + + return -ENOENT; +} + +static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, + enum hal_ring_type type, int ring_num) +{ + const u8 *grp_mask; + + switch (type) { + case HAL_WBM2SW_RELEASE: + if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) { + grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0]; + ring_num = 0; + } else { + grp_mask = &ab->hw_params->ring_mask->tx[0]; + } + break; + case HAL_REO_EXCEPTION: + grp_mask = &ab->hw_params->ring_mask->rx_err[0]; + break; + case HAL_REO_DST: + grp_mask = &ab->hw_params->ring_mask->rx[0]; + break; + case HAL_REO_STATUS: + grp_mask = &ab->hw_params->ring_mask->reo_status[0]; + break; + case HAL_RXDMA_MONITOR_STATUS: + case HAL_RXDMA_MONITOR_DST: + grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0]; + break; + case HAL_TX_MONITOR_DST: + grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0]; + break; + case HAL_RXDMA_BUF: + grp_mask = &ab->hw_params->ring_mask->host2rxdma[0]; + break; + case HAL_RXDMA_MONITOR_BUF: + case HAL_TCL_DATA: + case HAL_TCL_CMD: + case HAL_REO_CMD: + case HAL_SW2WBM_RELEASE: + case HAL_WBM_IDLE_LINK: + case HAL_TCL_STATUS: + case HAL_REO_REINJECT: + case HAL_CE_SRC: + case HAL_CE_DST: + case HAL_CE_DST_STATUS: + default: + return -ENOENT; + } + + return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask); +} + +static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab, + struct hal_srng_params *ring_params, + enum hal_ring_type type, int ring_num) +{ + int msi_group_number, msi_data_count; + u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; + int ret; + + ret = ath12k_hif_get_user_msi_vector(ab, "DP", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return; + + msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type, + ring_num); + if (msi_group_number < 0) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "ring not part of an ext_group; ring_type: %d,ring_num %d", + type, ring_num); + ring_params->msi_addr = 0; + ring_params->msi_data = 0; + return; + } + + if (msi_group_number > msi_data_count) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "multiple msi_groups share one msi, msi_group_num %d", + msi_group_number); + } + + ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi); + + ring_params->msi_addr = addr_lo; + ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); + ring_params->msi_data = (msi_group_number % msi_data_count) + + msi_data_start; + ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; +} + +int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, + enum hal_ring_type type, int ring_num, + int mac_id, int num_entries) +{ + struct hal_srng_params params = { 0 }; + int entry_sz = ath12k_hal_srng_get_entrysize(ab, type); + int max_entries = ath12k_hal_srng_get_max_entries(ab, type); + int ret; + + if (max_entries < 0 || entry_sz < 0) + return -EINVAL; + + if (num_entries > max_entries) + num_entries = max_entries; + + ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; + ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, + &ring->paddr_unaligned, + GFP_KERNEL); + if (!ring->vaddr_unaligned) + return -ENOMEM; + + ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); + ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - + (unsigned long)ring->vaddr_unaligned); + + params.ring_base_vaddr = ring->vaddr; + params.ring_base_paddr = ring->paddr; + params.num_entries = num_entries; + ath12k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); + + switch (type) { + case HAL_REO_DST: + params.intr_batch_cntr_thres_entries = + HAL_SRNG_INT_BATCH_THRESHOLD_RX; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; + break; + case HAL_RXDMA_BUF: + case HAL_RXDMA_MONITOR_BUF: + case HAL_RXDMA_MONITOR_STATUS: + params.low_threshold = num_entries >> 3; + params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; + params.intr_batch_cntr_thres_entries = 0; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; + break; + case HAL_TX_MONITOR_DST: + params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3; + params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; + params.intr_batch_cntr_thres_entries = 0; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; + break; + case HAL_WBM2SW_RELEASE: + if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) { + params.intr_batch_cntr_thres_entries = + HAL_SRNG_INT_BATCH_THRESHOLD_TX; + params.intr_timer_thres_us = + HAL_SRNG_INT_TIMER_THRESHOLD_TX; + break; + } + /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */ + fallthrough; + case HAL_REO_EXCEPTION: + case HAL_REO_REINJECT: + case HAL_REO_CMD: + case HAL_REO_STATUS: + case HAL_TCL_DATA: + case HAL_TCL_CMD: + case HAL_TCL_STATUS: + case HAL_WBM_IDLE_LINK: + case HAL_SW2WBM_RELEASE: + case HAL_RXDMA_DST: + case HAL_RXDMA_MONITOR_DST: + case HAL_RXDMA_MONITOR_DESC: + params.intr_batch_cntr_thres_entries = + HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; + break; + case HAL_RXDMA_DIR_BUF: + break; + default: + ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type); + return -EINVAL; + } + + ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); + if (ret < 0) { + ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n", + ret, ring_num); + return ret; + } + + ring->ring_id = ret; + + return 0; +} + +static +u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif) +{ + u32 bank_config = 0; + + /* Only valid for raw frames with HW crypto enabled. + * With SW crypto, mac80211 sets key per packet + */ + if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) + bank_config |= + u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher), + HAL_TX_BANK_CONFIG_ENCRYPT_TYPE); + + bank_config |= u32_encode_bits(arvif->tx_encap_type, + HAL_TX_BANK_CONFIG_ENCAP_TYPE); + bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) | + u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) | + u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD); + + /* only valid if idx_lookup_override is not set in tcl_data_cmd */ + bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); + + bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN, + HAL_TX_BANK_CONFIG_ADDRX_EN) | + u32_encode_bits(!!(arvif->hal_addr_search_flags & + HAL_TX_ADDRY_EN), + HAL_TX_BANK_CONFIG_ADDRY_EN); + + bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0, + HAL_TX_BANK_CONFIG_MESH_EN) | + u32_encode_bits(arvif->vdev_id_check_en, + HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN); + + bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID); + + return bank_config; +} + +static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif, + struct ath12k_dp *dp) +{ + int bank_id = DP_INVALID_BANK_ID; + int i; + u32 bank_config; + bool configure_register = false; + + /* convert vdev params into hal_tx_bank_config */ + bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif); + + spin_lock_bh(&dp->tx_bank_lock); + /* TODO: implement using idr kernel framework*/ + for (i = 0; i < dp->num_bank_profiles; i++) { + if (dp->bank_profiles[i].is_configured && + (dp->bank_profiles[i].bank_config ^ bank_config) == 0) { + bank_id = i; + goto inc_ref_and_return; + } + if (!dp->bank_profiles[i].is_configured || + !dp->bank_profiles[i].num_users) { + bank_id = i; + goto configure_and_return; + } + } + + if (bank_id == DP_INVALID_BANK_ID) { + spin_unlock_bh(&dp->tx_bank_lock); + ath12k_err(ab, "unable to find TX bank!"); + return bank_id; + } + +configure_and_return: + dp->bank_profiles[bank_id].is_configured = true; + dp->bank_profiles[bank_id].bank_config = bank_config; + configure_register = true; +inc_ref_and_return: + dp->bank_profiles[bank_id].num_users++; + spin_unlock_bh(&dp->tx_bank_lock); + + if (configure_register) + ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id); + + ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u", + bank_id, bank_config, dp->bank_profiles[bank_id].bank_config, + dp->bank_profiles[bank_id].num_users); + + return bank_id; +} + +void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id) +{ + spin_lock_bh(&dp->tx_bank_lock); + dp->bank_profiles[bank_id].num_users--; + spin_unlock_bh(&dp->tx_bank_lock); +} + +static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + + kfree(dp->bank_profiles); + dp->bank_profiles = NULL; +} + +static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + u32 num_tcl_banks = ab->hw_params->num_tcl_banks; + int i; + + dp->num_bank_profiles = num_tcl_banks; + dp->bank_profiles = kmalloc_array(num_tcl_banks, + sizeof(struct ath12k_dp_tx_bank_profile), + GFP_KERNEL); + if (!dp->bank_profiles) + return -ENOMEM; + + spin_lock_init(&dp->tx_bank_lock); + + for (i = 0; i < num_tcl_banks; i++) { + dp->bank_profiles[i].is_configured = false; + dp->bank_profiles[i].num_users = 0; + } + + return 0; +} + +static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i; + + ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring); + ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); + ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring); + ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring); + ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); + ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); + } + ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring); + ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); + ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); +} + +static int ath12k_dp_srng_common_setup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + const struct ath12k_hal_tcl_to_wbm_rbm_map *map; + struct hal_srng *srng; + int i, ret, tx_comp_ring_num; + u32 ring_hash_map; + + ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, + HAL_SW2WBM_RELEASE, 0, 0, + DP_WBM_RELEASE_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", + ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, + DP_TCL_CMD_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, + 0, 0, DP_TCL_STATUS_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); + goto err; + } + + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; + tx_comp_ring_num = map[i].wbm_ring_num; + + ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, + HAL_TCL_DATA, i, 0, + DP_TCL_DATA_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", + i, ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, + HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0, + DP_TX_COMP_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", + tx_comp_ring_num, ret); + goto err; + } + } + + ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, + 0, 0, DP_REO_REINJECT_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n", + ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, + HAL_WBM2SW_REL_ERR_RING_NUM, 0, + DP_RX_RELEASE_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, + 0, 0, DP_REO_EXCEPTION_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up reo_exception ring :%d\n", + ret); + goto err; + } + + ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, + 0, 0, DP_REO_CMD_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); + goto err; + } + + srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; + ath12k_hal_reo_init_cmd_ring(ab, srng); + + ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, + 0, 0, DP_REO_STATUS_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret); + goto err; + } + + /* When hash based routing of rx packet is enabled, 32 entries to map + * the hash values to the ring will be configured. Each hash entry uses + * four bits to map to a particular ring. The ring mapping will be + * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5 + * 8:SW6, 9:SW7, 10:SW8, 11:Not used. + */ + ring_hash_map = HAL_HASH_ROUTING_RING_SW1 | + HAL_HASH_ROUTING_RING_SW2 << 4 | + HAL_HASH_ROUTING_RING_SW3 << 8 | + HAL_HASH_ROUTING_RING_SW4 << 12 | + HAL_HASH_ROUTING_RING_SW1 << 16 | + HAL_HASH_ROUTING_RING_SW2 << 20 | + HAL_HASH_ROUTING_RING_SW3 << 24 | + HAL_HASH_ROUTING_RING_SW4 << 28; + + ath12k_hal_reo_hw_setup(ab, ring_hash_map); + + return 0; + +err: + ath12k_dp_srng_common_cleanup(ab); + + return ret; +} + +static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; + int i; + + for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { + if (!slist[i].vaddr) + continue; + + dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, + slist[i].vaddr, slist[i].paddr); + slist[i].vaddr = NULL; + } +} + +static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab, + int size, + u32 n_link_desc_bank, + u32 n_link_desc, + u32 last_bank_sz) +{ + struct ath12k_dp *dp = &ab->dp; + struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; + struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; + u32 n_entries_per_buf; + int num_scatter_buf, scatter_idx; + struct hal_wbm_link_desc *scatter_buf; + int align_bytes, n_entries; + dma_addr_t paddr; + int rem_entries; + int i; + int ret = 0; + u32 end_offset, cookie; + + n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / + ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); + num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); + + if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) + return -EINVAL; + + for (i = 0; i < num_scatter_buf; i++) { + slist[i].vaddr = dma_alloc_coherent(ab->dev, + HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, + &slist[i].paddr, GFP_KERNEL); + if (!slist[i].vaddr) { + ret = -ENOMEM; + goto err; + } + } + + scatter_idx = 0; + scatter_buf = slist[scatter_idx].vaddr; + rem_entries = n_entries_per_buf; + + for (i = 0; i < n_link_desc_bank; i++) { + align_bytes = link_desc_banks[i].vaddr - + link_desc_banks[i].vaddr_unaligned; + n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / + HAL_LINK_DESC_SIZE; + paddr = link_desc_banks[i].paddr; + while (n_entries) { + cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); + ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr); + n_entries--; + paddr += HAL_LINK_DESC_SIZE; + if (rem_entries) { + rem_entries--; + scatter_buf++; + continue; + } + + rem_entries = n_entries_per_buf; + scatter_idx++; + scatter_buf = slist[scatter_idx].vaddr; + } + } + + end_offset = (scatter_buf - slist[scatter_idx].vaddr) * + sizeof(struct hal_wbm_link_desc); + ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, + n_link_desc, end_offset); + + return 0; + +err: + ath12k_dp_scatter_idle_link_desc_cleanup(ab); + + return ret; +} + +static void +ath12k_dp_link_desc_bank_free(struct ath12k_base *ab, + struct dp_link_desc_bank *link_desc_banks) +{ + int i; + + for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { + if (link_desc_banks[i].vaddr_unaligned) { + dma_free_coherent(ab->dev, + link_desc_banks[i].size, + link_desc_banks[i].vaddr_unaligned, + link_desc_banks[i].paddr_unaligned); + link_desc_banks[i].vaddr_unaligned = NULL; + } + } +} + +static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab, + struct dp_link_desc_bank *desc_bank, + int n_link_desc_bank, + int last_bank_sz) +{ + struct ath12k_dp *dp = &ab->dp; + int i; + int ret = 0; + int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; + + for (i = 0; i < n_link_desc_bank; i++) { + if (i == (n_link_desc_bank - 1) && last_bank_sz) + desc_sz = last_bank_sz; + + desc_bank[i].vaddr_unaligned = + dma_alloc_coherent(ab->dev, desc_sz, + &desc_bank[i].paddr_unaligned, + GFP_KERNEL); + if (!desc_bank[i].vaddr_unaligned) { + ret = -ENOMEM; + goto err; + } + + desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, + HAL_LINK_DESC_ALIGN); + desc_bank[i].paddr = desc_bank[i].paddr_unaligned + + ((unsigned long)desc_bank[i].vaddr - + (unsigned long)desc_bank[i].vaddr_unaligned); + desc_bank[i].size = desc_sz; + } + + return 0; + +err: + ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks); + + return ret; +} + +void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab, + struct dp_link_desc_bank *desc_bank, + u32 ring_type, struct dp_srng *ring) +{ + ath12k_dp_link_desc_bank_free(ab, desc_bank); + + if (ring_type != HAL_RXDMA_MONITOR_DESC) { + ath12k_dp_srng_cleanup(ab, ring); + ath12k_dp_scatter_idle_link_desc_cleanup(ab); + } +} + +static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc) +{ + struct ath12k_dp *dp = &ab->dp; + u32 n_mpdu_link_desc, n_mpdu_queue_desc; + u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; + int ret = 0; + + n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / + HAL_NUM_MPDUS_PER_LINK_DESC; + + n_mpdu_queue_desc = n_mpdu_link_desc / + HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; + + n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * + DP_AVG_MSDUS_PER_FLOW) / + HAL_NUM_TX_MSDUS_PER_LINK_DESC; + + n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * + DP_AVG_MSDUS_PER_MPDU) / + HAL_NUM_RX_MSDUS_PER_LINK_DESC; + + *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + + n_tx_msdu_link_desc + n_rx_msdu_link_desc; + + if (*n_link_desc & (*n_link_desc - 1)) + *n_link_desc = 1 << fls(*n_link_desc); + + ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring, + HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); + if (ret) { + ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); + return ret; + } + return ret; +} + +int ath12k_dp_link_desc_setup(struct ath12k_base *ab, + struct dp_link_desc_bank *link_desc_banks, + u32 ring_type, struct hal_srng *srng, + u32 n_link_desc) +{ + u32 tot_mem_sz; + u32 n_link_desc_bank, last_bank_sz; + u32 entry_sz, align_bytes, n_entries; + struct hal_wbm_link_desc *desc; + u32 paddr; + int i, ret; + u32 cookie; + + tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; + tot_mem_sz += HAL_LINK_DESC_ALIGN; + + if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { + n_link_desc_bank = 1; + last_bank_sz = tot_mem_sz; + } else { + n_link_desc_bank = tot_mem_sz / + (DP_LINK_DESC_ALLOC_SIZE_THRESH - + HAL_LINK_DESC_ALIGN); + last_bank_sz = tot_mem_sz % + (DP_LINK_DESC_ALLOC_SIZE_THRESH - + HAL_LINK_DESC_ALIGN); + + if (last_bank_sz) + n_link_desc_bank += 1; + } + + if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) + return -EINVAL; + + ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks, + n_link_desc_bank, last_bank_sz); + if (ret) + return ret; + + /* Setup link desc idle list for HW internal usage */ + entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type); + tot_mem_sz = entry_sz * n_link_desc; + + /* Setup scatter desc list when the total memory requirement is more */ + if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && + ring_type != HAL_RXDMA_MONITOR_DESC) { + ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, + n_link_desc_bank, + n_link_desc, + last_bank_sz); + if (ret) { + ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", + ret); + goto fail_desc_bank_free; + } + + return 0; + } + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + for (i = 0; i < n_link_desc_bank; i++) { + align_bytes = link_desc_banks[i].vaddr - + link_desc_banks[i].vaddr_unaligned; + n_entries = (link_desc_banks[i].size - align_bytes) / + HAL_LINK_DESC_SIZE; + paddr = link_desc_banks[i].paddr; + while (n_entries && + (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) { + cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); + ath12k_hal_set_link_desc_addr(desc, + cookie, paddr); + n_entries--; + paddr += HAL_LINK_DESC_SIZE; + } + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return 0; + +fail_desc_bank_free: + ath12k_dp_link_desc_bank_free(ab, link_desc_banks); + + return ret; +} + +int ath12k_dp_service_srng(struct ath12k_base *ab, + struct ath12k_ext_irq_grp *irq_grp, + int budget) +{ + struct napi_struct *napi = &irq_grp->napi; + int grp_id = irq_grp->grp_id; + int work_done = 0; + int i = 0, j; + int tot_work_done = 0; + enum dp_monitor_mode monitor_mode; + u8 ring_mask; + + while (i < ab->hw_params->max_tx_ring) { + if (ab->hw_params->ring_mask->tx[grp_id] & + BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num)) + ath12k_dp_tx_completion_handler(ab, i); + i++; + } + + if (ab->hw_params->ring_mask->rx_err[grp_id]) { + work_done = ath12k_dp_rx_process_err(ab, napi, budget); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + + if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) { + work_done = ath12k_dp_rx_process_wbm_err(ab, + napi, + budget); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + + if (ab->hw_params->ring_mask->rx[grp_id]) { + i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1; + work_done = ath12k_dp_rx_process(ab, i, napi, + budget); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + + if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) { + monitor_mode = ATH12K_DP_RX_MONITOR_MODE; + ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; + for (i = 0; i < ab->num_radios; i++) { + for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { + int id = i * ab->hw_params->num_rxmda_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_dp_mon_process_ring(ab, id, napi, budget, + monitor_mode); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + } + } + } + + if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) { + monitor_mode = ATH12K_DP_TX_MONITOR_MODE; + ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id]; + for (i = 0; i < ab->num_radios; i++) { + for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { + int id = i * ab->hw_params->num_rxmda_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_dp_mon_process_ring(ab, id, napi, budget, + monitor_mode); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + } + } + } + + if (ab->hw_params->ring_mask->reo_status[grp_id]) + ath12k_dp_rx_process_reo_status(ab); + + if (ab->hw_params->ring_mask->host2rxdma[grp_id]) { + struct ath12k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0, + ab->hw_params->hal_params->rx_buf_rbm, + true); + } + + /* TODO: Implement handler for other interrupts */ + +done: + return tot_work_done; +} + +void ath12k_dp_pdev_free(struct ath12k_base *ab) +{ + int i; + + del_timer_sync(&ab->mon_reap_timer); + + for (i = 0; i < ab->num_radios; i++) + ath12k_dp_rx_pdev_free(ab, i); +} + +void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev_dp *dp; + int i; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + dp = &ar->dp; + dp->mac_id = i; + atomic_set(&dp->num_tx_pending, 0); + init_waitqueue_head(&dp->tx_empty_waitq); + + /* TODO: Add any RXDMA setup required per pdev */ + } +} + +static void ath12k_dp_service_mon_ring(struct timer_list *t) +{ + struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer); + int i; + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) + ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET, + ATH12K_DP_RX_MONITOR_MODE); + + mod_timer(&ab->mon_reap_timer, jiffies + + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); +} + +static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab) +{ + if (ab->hw_params->rxdma1_enable) + return; + + timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0); +} + +int ath12k_dp_pdev_alloc(struct ath12k_base *ab) +{ + struct ath12k *ar; + int ret; + int i; + + ret = ath12k_dp_rx_htt_setup(ab); + if (ret) + goto out; + + ath12k_dp_mon_reap_timer_init(ab); + + /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */ + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + ret = ath12k_dp_rx_pdev_alloc(ab, i); + if (ret) { + ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", + i); + goto err; + } + ret = ath12k_dp_rx_pdev_mon_attach(ar); + if (ret) { + ath12k_warn(ab, "failed to initialize mon pdev %d\n", i); + goto err; + } + } + + return 0; +err: + ath12k_dp_pdev_free(ab); +out: + return ret; +} + +int ath12k_dp_htt_connect(struct ath12k_dp *dp) +{ + struct ath12k_htc_svc_conn_req conn_req = {0}; + struct ath12k_htc_svc_conn_resp conn_resp = {0}; + int status; + + conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler; + + /* connect to control service */ + conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG; + + status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req, + &conn_resp); + + if (status) + return status; + + dp->eid = conn_resp.eid; + + return 0; +} + +static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif) +{ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + /* TODO: Verify the search type and flags since ast hash + * is not part of peer mapv3 + */ + arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; + arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; + break; + case WMI_VDEV_TYPE_AP: + case WMI_VDEV_TYPE_IBSS: + arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; + arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; + break; + case WMI_VDEV_TYPE_MONITOR: + default: + return; + } +} + +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif) +{ + struct ath12k_base *ab = ar->ab; + + arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) | + u32_encode_bits(arvif->vdev_id, + HTT_TCL_META_DATA_VDEV_ID) | + u32_encode_bits(ar->pdev->pdev_id, + HTT_TCL_META_DATA_PDEV_ID); + + /* set HTT extension valid bit to 0 by default */ + arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; + + ath12k_dp_update_vdev_search(arvif); + arvif->vdev_id_check_en = true; + arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp); + + /* TODO: error path for bank id failure */ + if (arvif->bank_id == DP_INVALID_BANK_ID) { + ath12k_err(ar->ab, "Failed to initialize DP TX Banks"); + return; + } +} + +static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) +{ + struct ath12k_rx_desc_info *desc_info, *tmp; + struct ath12k_tx_desc_info *tx_desc_info, *tmp1; + struct ath12k_dp *dp = &ab->dp; + struct sk_buff *skb; + int i; + + if (!dp->spt_info) + return; + + /* RX Descriptor cleanup */ + spin_lock_bh(&dp->rx_desc_lock); + + list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) { + list_del(&desc_info->list); + skb = desc_info->skb; + + if (!skb) + continue; + + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + + spin_unlock_bh(&dp->rx_desc_lock); + + /* TX Descriptor cleanup */ + for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { + spin_lock_bh(&dp->tx_desc_lock[i]); + + list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i], + list) { + list_del(&tx_desc_info->list); + skb = tx_desc_info->skb; + + if (!skb) + continue; + + dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } + + spin_unlock_bh(&dp->tx_desc_lock[i]); + } + + /* unmap SPT pages */ + for (i = 0; i < dp->num_spt_pages; i++) { + if (!dp->spt_info[i].vaddr) + continue; + + dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE, + dp->spt_info[i].vaddr, dp->spt_info[i].paddr); + dp->spt_info[i].vaddr = NULL; + } + + kfree(dp->spt_info); +} + +static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + + if (!ab->hw_params->reoq_lut_support) + return; + + if (!dp->reoq_lut.vaddr) + return; + + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, + dp->reoq_lut.vaddr, dp->reoq_lut.paddr); + dp->reoq_lut.vaddr = NULL; + + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0); +} + +void ath12k_dp_free(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i; + + ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks, + HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); + + ath12k_dp_cc_cleanup(ab); + ath12k_dp_reoq_lut_cleanup(ab); + ath12k_dp_deinit_bank_profiles(ab); + ath12k_dp_srng_common_cleanup(ab); + + ath12k_dp_rx_reo_cmd_list_cleanup(ab); + + for (i = 0; i < ab->hw_params->max_tx_ring; i++) + kfree(dp->tx_ring[i].tx_status); + + ath12k_dp_rx_free(ab); + /* Deinit any SOC level resource */ +} + +void ath12k_dp_cc_config(struct ath12k_base *ab) +{ + u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; + u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; + u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG; + u32 val = 0; + + ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base); + + val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB, + HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) | + u32_encode_bits(ATH12K_CC_PPT_MSB, + HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) | + u32_encode_bits(ATH12K_CC_SPT_MSB, + HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) | + u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) | + u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) | + u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE); + + ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val); + + /* Enable HW CC for WBM */ + ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base); + + val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB, + HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) | + u32_encode_bits(ATH12K_CC_PPT_MSB, + HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) | + u32_encode_bits(ATH12K_CC_SPT_MSB, + HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) | + u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN); + + ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val); + + /* Enable conversion complete indication */ + val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2); + val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) | + u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) | + u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN); + + ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val); + + /* Enable Cookie conversion for WBM2SW Rings */ + val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG); + val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) | + ab->hw_params->hal_params->wbm2sw_cc_enable; + + ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val); +} + +static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx) +{ + return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx; +} + +static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab, + u16 ppt_idx, u16 spt_idx) +{ + struct ath12k_dp *dp = &ab->dp; + + return dp->spt_info[ppt_idx].vaddr + spt_idx; +} + +struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, + u32 cookie) +{ + struct ath12k_rx_desc_info **desc_addr_ptr; + u16 ppt_idx, spt_idx; + + ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); + spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + + if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES || + spt_idx > ATH12K_MAX_SPT_ENTRIES) + return NULL; + + desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx); + + return *desc_addr_ptr; +} + +struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, + u32 cookie) +{ + struct ath12k_tx_desc_info **desc_addr_ptr; + u16 ppt_idx, spt_idx; + + ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); + spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + + if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES || + ppt_idx > ab->dp.num_spt_pages || + spt_idx > ATH12K_MAX_SPT_ENTRIES) + return NULL; + + desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx); + + return *desc_addr_ptr; +} + +static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr; + struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr; + u32 i, j, pool_id, tx_spt_page; + u32 ppt_idx; + + spin_lock_bh(&dp->rx_desc_lock); + + /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */ + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs), + GFP_ATOMIC); + + if (!rx_descs) { + spin_unlock_bh(&dp->rx_desc_lock); + return -ENOMEM; + } + + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { + rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j); + rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; + list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list); + + /* Update descriptor VA in SPT */ + rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j); + *rx_desc_addr = &rx_descs[j]; + } + } + + spin_unlock_bh(&dp->rx_desc_lock); + + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { + tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs), + GFP_ATOMIC); + + if (!tx_descs) { + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + /* Caller takes care of TX pending and RX desc cleanup */ + return -ENOMEM; + } + + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page; + tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); + tx_descs[j].pool_id = pool_id; + list_add_tail(&tx_descs[j].list, + &dp->tx_desc_free_list[pool_id]); + + /* Update descriptor VA in SPT */ + tx_desc_addr = + ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j); + *tx_desc_addr = &tx_descs[j]; + } + } + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + } + return 0; +} + +static int ath12k_dp_cc_init(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i, ret = 0; + u32 cmem_base; + + INIT_LIST_HEAD(&dp->rx_desc_free_list); + INIT_LIST_HEAD(&dp->rx_desc_used_list); + spin_lock_init(&dp->rx_desc_lock); + + for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { + INIT_LIST_HEAD(&dp->tx_desc_free_list[i]); + INIT_LIST_HEAD(&dp->tx_desc_used_list[i]); + spin_lock_init(&dp->tx_desc_lock[i]); + } + + dp->num_spt_pages = ATH12K_NUM_SPT_PAGES; + if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES) + dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES; + + dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info), + GFP_KERNEL); + + if (!dp->spt_info) { + ath12k_warn(ab, "SPT page allocation failure"); + return -ENOMEM; + } + + cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; + + for (i = 0; i < dp->num_spt_pages; i++) { + dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev, + ATH12K_PAGE_SIZE, + &dp->spt_info[i].paddr, + GFP_KERNEL); + + if (!dp->spt_info[i].vaddr) { + ret = -ENOMEM; + goto free; + } + + if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) { + ath12k_warn(ab, "SPT allocated memoty is not 4K aligned"); + ret = -EINVAL; + goto free; + } + + /* Write to PPT in CMEM */ + ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i), + dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); + } + + ret = ath12k_dp_cc_desc_init(ab); + if (ret) { + ath12k_warn(ab, "HW CC desc init failed %d", ret); + goto free; + } + + return 0; +free: + ath12k_dp_cc_cleanup(ab); + return ret; +} + +static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + + if (!ab->hw_params->reoq_lut_support) + return 0; + + dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev, + DP_REOQ_LUT_SIZE, + &dp->reoq_lut.paddr, + GFP_KERNEL); + + if (!dp->reoq_lut.vaddr) { + ath12k_warn(ab, "failed to allocate memory for reoq table"); + return -ENOMEM; + } + + memset(dp->reoq_lut.vaddr, 0, DP_REOQ_LUT_SIZE); + + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), + dp->reoq_lut.paddr); + return 0; +} + +int ath12k_dp_alloc(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct hal_srng *srng = NULL; + size_t size = 0; + u32 n_link_desc = 0; + int ret; + int i; + + dp->ab = ab; + + INIT_LIST_HEAD(&dp->reo_cmd_list); + INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); + spin_lock_init(&dp->reo_cmd_lock); + + dp->reo_cmd_cache_flush_count = 0; + + ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc); + if (ret) { + ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); + return ret; + } + + srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; + + ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks, + HAL_WBM_IDLE_LINK, srng, n_link_desc); + if (ret) { + ath12k_warn(ab, "failed to setup link desc: %d\n", ret); + return ret; + } + + ret = ath12k_dp_cc_init(ab); + + if (ret) { + ath12k_warn(ab, "failed to setup cookie converter %d\n", ret); + goto fail_link_desc_cleanup; + } + ret = ath12k_dp_init_bank_profiles(ab); + if (ret) { + ath12k_warn(ab, "failed to setup bank profiles %d\n", ret); + goto fail_hw_cc_cleanup; + } + + ret = ath12k_dp_srng_common_setup(ab); + if (ret) + goto fail_dp_bank_profiles_cleanup; + + size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE; + + ret = ath12k_dp_reoq_lut_setup(ab); + if (ret) { + ath12k_warn(ab, "failed to setup reoq table %d\n", ret); + goto fail_cmn_srng_cleanup; + } + + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + dp->tx_ring[i].tcl_data_ring_id = i; + + dp->tx_ring[i].tx_status_head = 0; + dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; + dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); + if (!dp->tx_ring[i].tx_status) { + ret = -ENOMEM; + /* FIXME: The allocated tx status is not freed + * properly here + */ + goto fail_cmn_reoq_cleanup; + } + } + + for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) + ath12k_hal_tx_set_dscp_tid_map(ab, i); + + ret = ath12k_dp_rx_alloc(ab); + if (ret) + goto fail_dp_rx_free; + + /* Init any SOC level resource for DP */ + + return 0; + +fail_dp_rx_free: + ath12k_dp_rx_free(ab); + +fail_cmn_reoq_cleanup: + ath12k_dp_reoq_lut_cleanup(ab); + +fail_cmn_srng_cleanup: + ath12k_dp_srng_common_cleanup(ab); + +fail_dp_bank_profiles_cleanup: + ath12k_dp_deinit_bank_profiles(ab); + +fail_hw_cc_cleanup: + ath12k_dp_cc_cleanup(ab); + +fail_link_desc_cleanup: + ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks, + HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); + + return ret; +} diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h new file mode 100644 index 000000000000..36a876d7f61d --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -0,0 +1,1816 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_DP_H +#define ATH12K_DP_H + +#include "hal_rx.h" +#include "hw.h" + +#define MAX_RXDMA_PER_PDEV 2 + +struct ath12k_base; +struct ath12k_peer; +struct ath12k_dp; +struct ath12k_vif; +struct hal_tcl_status_ring; +struct ath12k_ext_irq_grp; + +#define DP_MON_PURGE_TIMEOUT_MS 100 +#define DP_MON_SERVICE_BUDGET 128 + +struct dp_srng { + u32 *vaddr_unaligned; + u32 *vaddr; + dma_addr_t paddr_unaligned; + dma_addr_t paddr; + int size; + u32 ring_id; +}; + +struct dp_rxdma_ring { + struct dp_srng refill_buf_ring; + struct idr bufs_idr; + /* Protects bufs_idr */ + spinlock_t idr_lock; + int bufs_max; +}; + +#define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE) + +struct dp_tx_ring { + u8 tcl_data_ring_id; + struct dp_srng tcl_data_ring; + struct dp_srng tcl_comp_ring; + struct hal_wbm_completion_ring_tx *tx_status; + int tx_status_head; + int tx_status_tail; +}; + +struct ath12k_pdev_mon_stats { + u32 status_ppdu_state; + u32 status_ppdu_start; + u32 status_ppdu_end; + u32 status_ppdu_compl; + u32 status_ppdu_start_mis; + u32 status_ppdu_end_mis; + u32 status_ppdu_done; + u32 dest_ppdu_done; + u32 dest_mpdu_done; + u32 dest_mpdu_drop; + u32 dup_mon_linkdesc_cnt; + u32 dup_mon_buf_cnt; +}; + +struct dp_link_desc_bank { + void *vaddr_unaligned; + void *vaddr; + dma_addr_t paddr_unaligned; + dma_addr_t paddr; + u32 size; +}; + +/* Size to enforce scatter idle list mode */ +#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000 +#define DP_LINK_DESC_BANKS_MAX 8 + +#define DP_LINK_DESC_START 0x4000 +#define DP_LINK_DESC_SHIFT 3 + +#define DP_LINK_DESC_COOKIE_SET(id, page) \ + ((((id) + DP_LINK_DESC_START) << DP_LINK_DESC_SHIFT) | (page)) + +#define DP_LINK_DESC_BANK_MASK GENMASK(2, 0) + +#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff +#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000 +#define DP_RX_DESC_COOKIE_MAX \ + (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX) +#define DP_NOT_PPDU_ID_WRAP_AROUND 20000 + +enum ath12k_dp_ppdu_state { + DP_PPDU_STATUS_START, + DP_PPDU_STATUS_DONE, +}; + +struct dp_mon_mpdu { + struct list_head list; + struct sk_buff *head; + struct sk_buff *tail; +}; + +#define DP_MON_MAX_STATUS_BUF 32 + +struct ath12k_mon_data { + struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX]; + struct hal_rx_mon_ppdu_info mon_ppdu_info; + + u32 mon_ppdu_status; + u32 mon_last_buf_cookie; + u64 mon_last_linkdesc_paddr; + u16 chan_noise_floor; + + struct ath12k_pdev_mon_stats rx_mon_stats; + /* lock for monitor data */ + spinlock_t mon_lock; + struct sk_buff_head rx_status_q; + struct dp_mon_mpdu *mon_mpdu; + struct list_head dp_rx_mon_mpdu_list; + struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF]; + struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info; + struct dp_mon_tx_ppdu_info *tx_data_ppdu_info; +}; + +struct ath12k_pdev_dp { + u32 mac_id; + atomic_t num_tx_pending; + wait_queue_head_t tx_empty_waitq; + struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV]; + struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV]; + + struct ieee80211_rx_status rx_status; + struct ath12k_mon_data mon_data; +}; + +#define DP_NUM_CLIENTS_MAX 64 +#define DP_AVG_TIDS_PER_CLIENT 2 +#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT) +#define DP_AVG_MSDUS_PER_FLOW 128 +#define DP_AVG_FLOWS_PER_TID 2 +#define DP_AVG_MPDUS_PER_TID_MAX 128 +#define DP_AVG_MSDUS_PER_MPDU 4 + +#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */ + +#define DP_BA_WIN_SZ_MAX 256 + +#define DP_TCL_NUM_RING_MAX 4 + +#define DP_IDLE_SCATTER_BUFS_MAX 16 + +#define DP_WBM_RELEASE_RING_SIZE 64 +#define DP_TCL_DATA_RING_SIZE 512 +#define DP_TX_COMP_RING_SIZE 32768 +#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE +#define DP_TCL_CMD_RING_SIZE 32 +#define DP_TCL_STATUS_RING_SIZE 32 +#define DP_REO_DST_RING_MAX 8 +#define DP_REO_DST_RING_SIZE 2048 +#define DP_REO_REINJECT_RING_SIZE 32 +#define DP_RX_RELEASE_RING_SIZE 1024 +#define DP_REO_EXCEPTION_RING_SIZE 128 +#define DP_REO_CMD_RING_SIZE 128 +#define DP_REO_STATUS_RING_SIZE 2048 +#define DP_RXDMA_BUF_RING_SIZE 4096 +#define DP_RXDMA_REFILL_RING_SIZE 2048 +#define DP_RXDMA_ERR_DST_RING_SIZE 1024 +#define DP_RXDMA_MON_STATUS_RING_SIZE 1024 +#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096 +#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048 +#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096 +#define DP_TX_MONITOR_BUF_RING_SIZE 4096 +#define DP_TX_MONITOR_DEST_RING_SIZE 2048 + +#define DP_TX_MONITOR_BUF_SIZE 2048 +#define DP_TX_MONITOR_BUF_SIZE_MIN 48 +#define DP_TX_MONITOR_BUF_SIZE_MAX 8192 + +#define DP_RX_BUFFER_SIZE 2048 +#define DP_RX_BUFFER_SIZE_LITE 1024 +#define DP_RX_BUFFER_ALIGN_SIZE 128 + +#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0) +#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18) + +#define DP_HW2SW_MACID(mac_id) ({ typeof(mac_id) x = (mac_id); x ? x - 1 : 0; }) +#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1) + +#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0) +#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2) +#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19) + +#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20 +#define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10 + +#define ATH12K_NUM_POOL_TX_DESC 32768 + +/* TODO: revisit this count during testing */ +#define ATH12K_RX_DESC_COUNT (12288) + +#define ATH12K_PAGE_SIZE PAGE_SIZE + +/* Total 1024 entries in PPT, i.e 4K/4 considering 4K aligned + * SPT pages which makes lower 12bits 0 + */ +#define ATH12K_MAX_PPT_ENTRIES 1024 + +/* Total 512 entries in a SPT, i.e 4K Page/8 */ +#define ATH12K_MAX_SPT_ENTRIES 512 + +#define ATH12K_NUM_RX_SPT_PAGES ((ATH12K_RX_DESC_COUNT) / ATH12K_MAX_SPT_ENTRIES) + +#define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \ + ATH12K_MAX_SPT_ENTRIES) +#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES) +#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES) + +/* The SPT pages are divided for RX and TX, first block for RX + * and remaining for TX + */ +#define ATH12K_NUM_TX_SPT_PAGE_START ATH12K_NUM_RX_SPT_PAGES + +#define ATH12K_DP_RX_DESC_MAGIC 0xBABABABA + +/* 4K aligned address have last 12 bits set to 0, this check is done + * so that two spt pages address can be stored per 8bytes + * of CMEM (PPT) + */ +#define ATH12K_SPT_4K_ALIGN_CHECK 0xFFF +#define ATH12K_SPT_4K_ALIGN_OFFSET 12 +#define ATH12K_PPT_ADDR_OFFSET(ppt_index) (4 * (ppt_index)) + +/* To indicate HW of CMEM address, b0-31 are cmem base received via QMI */ +#define ATH12K_CMEM_ADDR_MSB 0x10 + +/* Of 20 bits cookie, b0-b8 is to indicate SPT offset and b9-19 for PPT */ +#define ATH12K_CC_SPT_MSB 8 +#define ATH12K_CC_PPT_MSB 19 +#define ATH12K_CC_PPT_SHIFT 9 +#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0) +#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9) + +#define DP_REO_QREF_NUM GENMASK(31, 16) +#define DP_MAX_PEER_ID 2047 + +/* Total size of the LUT is based on 2K peers, each having reference + * for 17tids, note each entry is of type ath12k_reo_queue_ref + * hence total size is 2048 * 17 * 8 = 278528 + */ +#define DP_REOQ_LUT_SIZE 278528 + +/* Invalid TX Bank ID value */ +#define DP_INVALID_BANK_ID -1 + +struct ath12k_dp_tx_bank_profile { + u8 is_configured; + u32 num_users; + u32 bank_config; +}; + +struct ath12k_hp_update_timer { + struct timer_list timer; + bool started; + bool init; + u32 tx_num; + u32 timer_tx_num; + u32 ring_id; + u32 interval; + struct ath12k_base *ab; +}; + +struct ath12k_rx_desc_info { + struct list_head list; + struct sk_buff *skb; + u32 cookie; + u32 magic; +}; + +struct ath12k_tx_desc_info { + struct list_head list; + struct sk_buff *skb; + u32 desc_id; /* Cookie */ + u8 mac_id; + u8 pool_id; +}; + +struct ath12k_spt_info { + dma_addr_t paddr; + u64 *vaddr; +}; + +struct ath12k_reo_queue_ref { + u32 info0; + u32 info1; +} __packed; + +struct ath12k_reo_q_addr_lut { + dma_addr_t paddr; + u32 *vaddr; +}; + +struct ath12k_dp { + struct ath12k_base *ab; + u8 num_bank_profiles; + /* protects the access and update of bank_profiles */ + spinlock_t tx_bank_lock; + struct ath12k_dp_tx_bank_profile *bank_profiles; + enum ath12k_htc_ep_id eid; + struct completion htt_tgt_version_received; + u8 htt_tgt_ver_major; + u8 htt_tgt_ver_minor; + struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX]; + struct dp_srng wbm_idle_ring; + struct dp_srng wbm_desc_rel_ring; + struct dp_srng tcl_cmd_ring; + struct dp_srng tcl_status_ring; + struct dp_srng reo_reinject_ring; + struct dp_srng rx_rel_ring; + struct dp_srng reo_except_ring; + struct dp_srng reo_cmd_ring; + struct dp_srng reo_status_ring; + struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX]; + struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX]; + struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; + struct list_head reo_cmd_list; + struct list_head reo_cmd_cache_flush_list; + u32 reo_cmd_cache_flush_count; + + /* protects access to below fields, + * - reo_cmd_list + * - reo_cmd_cache_flush_list + * - reo_cmd_cache_flush_count + */ + spinlock_t reo_cmd_lock; + struct ath12k_hp_update_timer reo_cmd_timer; + struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX]; + struct ath12k_spt_info *spt_info; + u32 num_spt_pages; + struct list_head rx_desc_free_list; + struct list_head rx_desc_used_list; + /* protects the free and used desc list */ + spinlock_t rx_desc_lock; + + struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES]; + struct list_head tx_desc_used_list[ATH12K_HW_MAX_QUEUES]; + /* protects the free and used desc lists */ + spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES]; + + struct dp_rxdma_ring rx_refill_buf_ring; + struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV]; + struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV]; + struct dp_rxdma_ring rxdma_mon_buf_ring; + struct dp_rxdma_ring tx_mon_buf_ring; + struct ath12k_reo_q_addr_lut reoq_lut; +}; + +/* HTT definitions */ + +#define HTT_TCL_META_DATA_TYPE BIT(0) +#define HTT_TCL_META_DATA_VALID_HTT BIT(1) + +/* vdev meta data */ +#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2) +#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10) +#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12) + +/* peer meta data */ +#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2) + +#define HTT_TX_WBM_COMP_STATUS_OFFSET 8 + +/* HTT tx completion is overlayed in wbm_release_ring */ +#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) +#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0) +#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4) + +#define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24) + +struct htt_tx_wbm_completion { + __le32 rsvd0[2]; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 rsvd1; + +} __packed; + +enum htt_h2t_msg_type { + HTT_H2T_MSG_TYPE_VERSION_REQ = 0, + HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb, + HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc, + HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10, + HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11, + HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a, + HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b, +}; + +#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0) + +struct htt_ver_req_cmd { + __le32 ver_reg_info; +} __packed; + +enum htt_srng_ring_type { + HTT_HW_TO_SW_RING, + HTT_SW_TO_HW_RING, + HTT_SW_TO_SW_RING, +}; + +enum htt_srng_ring_id { + HTT_RXDMA_HOST_BUF_RING, + HTT_RXDMA_MONITOR_STATUS_RING, + HTT_RXDMA_MONITOR_BUF_RING, + HTT_RXDMA_MONITOR_DESC_RING, + HTT_RXDMA_MONITOR_DEST_RING, + HTT_HOST1_TO_FW_RXBUF_RING, + HTT_HOST2_TO_FW_RXBUF_RING, + HTT_RXDMA_NON_MONITOR_DEST_RING, + HTT_TX_MON_HOST2MON_BUF_RING, + HTT_TX_MON_MON2HOST_DEST_RING, +}; + +/* host -> target HTT_SRING_SETUP message + * + * After target is booted up, Host can send SRING setup message for + * each host facing LMAC SRING. Target setups up HW registers based + * on setup message and confirms back to Host if response_required is set. + * Host should wait for confirmation message before sending new SRING + * setup message + * + * The message would appear as follows: + * + * |31 24|23 20|19|18 16|15|14 8|7 0| + * |--------------- +-----------------+----------------+------------------| + * | ring_type | ring_id | pdev_id | msg_type | + * |----------------------------------------------------------------------| + * | ring_base_addr_lo | + * |----------------------------------------------------------------------| + * | ring_base_addr_hi | + * |----------------------------------------------------------------------| + * |ring_misc_cfg_flag|ring_entry_size| ring_size | + * |----------------------------------------------------------------------| + * | ring_head_offset32_remote_addr_lo | + * |----------------------------------------------------------------------| + * | ring_head_offset32_remote_addr_hi | + * |----------------------------------------------------------------------| + * | ring_tail_offset32_remote_addr_lo | + * |----------------------------------------------------------------------| + * | ring_tail_offset32_remote_addr_hi | + * |----------------------------------------------------------------------| + * | ring_msi_addr_lo | + * |----------------------------------------------------------------------| + * | ring_msi_addr_hi | + * |----------------------------------------------------------------------| + * | ring_msi_data | + * |----------------------------------------------------------------------| + * | intr_timer_th |IM| intr_batch_counter_th | + * |----------------------------------------------------------------------| + * | reserved |RR|PTCF| intr_low_threshold | + * |----------------------------------------------------------------------| + * Where + * IM = sw_intr_mode + * RR = response_required + * PTCF = prefetch_timer_cfg + * + * The message is interpreted as follows: + * dword0 - b'0:7 - msg_type: This will be set to + * HTT_H2T_MSG_TYPE_SRING_SETUP + * b'8:15 - pdev_id: + * 0 (for rings at SOC/UMAC level), + * 1/2/3 mac id (for rings at LMAC level) + * b'16:23 - ring_id: identify which ring is to setup, + * more details can be got from enum htt_srng_ring_id + * b'24:31 - ring_type: identify type of host rings, + * more details can be got from enum htt_srng_ring_type + * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address + * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address + * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words + * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units + * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and + * SW_TO_HW_RING. + * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs. + * dword4 - b'0:31 - ring_head_off32_remote_addr_lo: + * Lower 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the head + * element within the ring. + * (The head offset variable has type u32.) + * Valid for HW_TO_SW and SW_TO_SW rings. + * dword5 - b'0:31 - ring_head_off32_remote_addr_hi: + * Upper 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the head + * element within the ring. + * (The head offset variable has type u32.) + * Valid for HW_TO_SW and SW_TO_SW rings. + * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo: + * Lower 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the tail + * element within the ring. + * (The tail offset variable has type u32.) + * Valid for HW_TO_SW and SW_TO_SW rings. + * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi: + * Upper 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the tail + * element within the ring. + * (The tail offset variable has type u32.) + * Valid for HW_TO_SW and SW_TO_SW rings. + * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address + * valid only for HW_TO_SW_RING and SW_TO_HW_RING + * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address + * valid only for HW_TO_SW_RING and SW_TO_HW_RING + * dword10 - b'0:31 - ring_msi_data: MSI data + * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs + * valid only for HW_TO_SW_RING and SW_TO_HW_RING + * dword11 - b'0:14 - intr_batch_counter_th: + * batch counter threshold is in units of 4-byte words. + * HW internally maintains and increments batch count. + * (see SRING spec for detail description). + * When batch count reaches threshold value, an interrupt + * is generated by HW. + * b'15 - sw_intr_mode: + * This configuration shall be static. + * Only programmed at power up. + * 0: generate pulse style sw interrupts + * 1: generate level style sw interrupts + * b'16:31 - intr_timer_th: + * The timer init value when timer is idle or is + * initialized to start downcounting. + * In 8us units (to cover a range of 0 to 524 ms) + * dword12 - b'0:15 - intr_low_threshold: + * Used only by Consumer ring to generate ring_sw_int_p. + * Ring entries low threshold water mark, that is used + * in combination with the interrupt timer as well as + * the clearing of the level interrupt. + * b'16:18 - prefetch_timer_cfg: + * Used only by Consumer ring to set timer mode to + * support Application prefetch handling. + * The external tail offset/pointer will be updated + * at following intervals: + * 3'b000: (Prefetch feature disabled; used only for debug) + * 3'b001: 1 usec + * 3'b010: 4 usec + * 3'b011: 8 usec (default) + * 3'b100: 16 usec + * Others: Reserverd + * b'19 - response_required: + * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response + * b'20:31 - reserved: reserved for future use + */ + +#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0) +#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8) +#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16) +#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24) + +#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0) +#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16) +#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25) +#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27) +#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28) +#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29) + +#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0) +#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15) +#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16) + +#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0) +#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16) +#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19) + +struct htt_srng_setup_cmd { + __le32 info0; + __le32 ring_base_addr_lo; + __le32 ring_base_addr_hi; + __le32 info1; + __le32 ring_head_off32_remote_addr_lo; + __le32 ring_head_off32_remote_addr_hi; + __le32 ring_tail_off32_remote_addr_lo; + __le32 ring_tail_off32_remote_addr_hi; + __le32 ring_msi_addr_lo; + __le32 ring_msi_addr_hi; + __le32 msi_data; + __le32 intr_info; + __le32 info2; +} __packed; + +/* host -> target FW PPDU_STATS config message + * + * @details + * The following field definitions describe the format of the HTT host + * to target FW for PPDU_STATS_CFG msg. + * The message allows the host to configure the PPDU_STATS_IND messages + * produced by the target. + * + * |31 24|23 16|15 8|7 0| + * |-----------------------------------------------------------| + * | REQ bit mask | pdev_mask | msg type | + * |-----------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: identifies this is a req to configure ppdu_stats_ind from target + * Value: 0x11 + * - PDEV_MASK + * Bits 8:15 + * Purpose: identifies which pdevs this PPDU stats configuration applies to + * Value: This is a overloaded field, refer to usage and interpretation of + * PDEV in interface document. + * Bit 8 : Reserved for SOC stats + * Bit 9 - 15 : Indicates PDEV_MASK in DBDC + * Indicates MACID_MASK in DBS + * - REQ_TLV_BIT_MASK + * Bits 16:31 + * Purpose: each set bit indicates the corresponding PPDU stats TLV type + * needs to be included in the target's PPDU_STATS_IND messages. + * Value: refer htt_ppdu_stats_tlv_tag_t <<<??? + * + */ + +struct htt_ppdu_stats_cfg_cmd { + __le32 msg; +} __packed; + +#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) +#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) + +enum htt_ppdu_stats_tag_type { + HTT_PPDU_STATS_TAG_COMMON, + HTT_PPDU_STATS_TAG_USR_COMMON, + HTT_PPDU_STATS_TAG_USR_RATE, + HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64, + HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256, + HTT_PPDU_STATS_TAG_SCH_CMD_STATUS, + HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON, + HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64, + HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256, + HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS, + HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH, + HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY, + HTT_PPDU_STATS_TAG_INFO, + HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD, + + /* New TLV's are added above to this line */ + HTT_PPDU_STATS_TAG_MAX, +}; + +#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \ + | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \ + | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \ + | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \ + | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \ + | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \ + | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \ + | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY)) + +#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \ + BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \ + BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \ + BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \ + BIT(HTT_PPDU_STATS_TAG_INFO) | \ + BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \ + HTT_PPDU_STATS_TAG_DEFAULT) + +enum htt_stats_internal_ppdu_frametype { + HTT_STATS_PPDU_FTYPE_CTRL, + HTT_STATS_PPDU_FTYPE_DATA, + HTT_STATS_PPDU_FTYPE_BAR, + HTT_STATS_PPDU_FTYPE_MAX +}; + +/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message + * + * details: + * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to + * configure RXDMA rings. + * The configuration is per ring based and includes both packet subtypes + * and PPDU/MPDU TLVs. + * + * The message would appear as follows: + * + * |31 26|25|24|23 16|15 8|7 0| + * |-----------------+----------------+----------------+---------------| + * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type | + * |-------------------------------------------------------------------| + * | rsvd2 | ring_buffer_size | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_0 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_1 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_2 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_3 | + * |-------------------------------------------------------------------| + * | tlv_filter_in_flags | + * |-------------------------------------------------------------------| + * Where: + * PS = pkt_swap + * SS = status_swap + * The message is interpreted as follows: + * dword0 - b'0:7 - msg_type: This will be set to + * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG + * b'8:15 - pdev_id: + * 0 (for rings at SOC/UMAC level), + * 1/2/3 mac id (for rings at LMAC level) + * b'16:23 - ring_id : Identify the ring to configure. + * More details can be got from enum htt_srng_ring_id + * b'24 - status_swap: 1 is to swap status TLV + * b'25 - pkt_swap: 1 is to swap packet TLV + * b'26:31 - rsvd1: reserved for future use + * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * in byte units. + * Valid only for HW_TO_SW_RING and SW_TO_HW_RING + * - b'16:31 - rsvd2: Reserved for future use + * dword2 - b'0:31 - packet_type_enable_flags_0: + * Enable MGMT packet from 0b0000 to 0b1001 + * bits from low to high: FP, MD, MO - 3 bits + * FP: Filter_Pass + * MD: Monitor_Direct + * MO: Monitor_Other + * 10 mgmt subtypes * 3 bits -> 30 bits + * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs + * dword3 - b'0:31 - packet_type_enable_flags_1: + * Enable MGMT packet from 0b1010 to 0b1111 + * bits from low to high: FP, MD, MO - 3 bits + * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs + * dword4 - b'0:31 - packet_type_enable_flags_2: + * Enable CTRL packet from 0b0000 to 0b1001 + * bits from low to high: FP, MD, MO - 3 bits + * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs + * dword5 - b'0:31 - packet_type_enable_flags_3: + * Enable CTRL packet from 0b1010 to 0b1111, + * MCAST_DATA, UCAST_DATA, NULL_DATA + * bits from low to high: FP, MD, MO - 3 bits + * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs + * dword6 - b'0:31 - tlv_filter_in_flags: + * Filter in Attention/MPDU/PPDU/Header/User tlvs + * Refer to CFG_TLV_FILTER_IN_FLAG defs + */ + +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26) + +#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16) +#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16) +#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16) +#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0) + +enum htt_rx_filter_tlv_flags { + HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0), + HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1), + HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2), + HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3), + HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4), + HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5), + HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6), + HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7), + HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8), + HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9), + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10), + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11), + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12), +}; + +enum htt_rx_mgmt_pkt_filter_tlv_flags0 { + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29), +}; + +enum htt_rx_mgmt_pkt_filter_tlv_flags1 { + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14), + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15), + HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16), + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17), +}; + +enum htt_rx_ctrl_pkt_filter_tlv_flags2 { + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29), +}; + +enum htt_rx_ctrl_pkt_filter_tlv_flags3 { + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14), + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15), + HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16), + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17), +}; + +enum htt_rx_data_pkt_filter_tlv_flasg3 { + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18), + HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19), + HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20), + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21), + HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22), + HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23), + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24), + HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25), + HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26), +}; + +#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \ + (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) + +#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \ + (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) + +#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \ + (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) + +#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ + | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) + +#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ + | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) + +#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ + | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) + +#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA) + +#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA) + +#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA) + +#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ + | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) + +#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ + | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) + +#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ + | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) + +#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ + | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ + | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) + +#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ + | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ + | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) + +#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ + | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ + | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) + +#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \ + (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \ + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7) + +#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \ + (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \ + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7) + +#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \ + (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \ + HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15) + +#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \ + (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \ + HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15) + +#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \ + (HTT_RX_FP_CTRL_FILTER_FLASG2 | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \ + HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT) + +#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \ + (HTT_RX_MO_CTRL_FILTER_FLASG2 | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \ + HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT) + +#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3 + +#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3 + +#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3 + +#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3 + +#define HTT_RX_MON_FILTER_TLV_FLAGS \ + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE) + +#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \ + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE) + +#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \ + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ + HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \ + HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ + HTT_RX_FILTER_TLV_FLAGS_ATTENTION) + +/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */ +#define HTT_RX_TLV_FLAGS_RXDMA_RING \ + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ + HTT_RX_FILTER_TLV_FLAGS_MSDU_END) + +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) + +struct htt_rx_ring_selection_cfg_cmd { + __le32 info0; + __le32 info1; + __le32 pkt_type_en_flags0; + __le32 pkt_type_en_flags1; + __le32 pkt_type_en_flags2; + __le32 pkt_type_en_flags3; + __le32 rx_filter_tlv; + __le32 rx_packet_offset; + __le32 rx_mpdu_offset; + __le32 rx_msdu_offset; + __le32 rx_attn_offset; +} __packed; + +struct htt_rx_ring_tlv_filter { + u32 rx_filter; /* see htt_rx_filter_tlv_flags */ + u32 pkt_filter_flags0; /* MGMT */ + u32 pkt_filter_flags1; /* MGMT */ + u32 pkt_filter_flags2; /* CTRL */ + u32 pkt_filter_flags3; /* DATA */ + bool offset_valid; + u16 rx_packet_offset; + u16 rx_header_offset; + u16 rx_mpdu_end_offset; + u16 rx_mpdu_start_offset; + u16 rx_msdu_end_offset; + u16 rx_msdu_start_offset; + u16 rx_attn_offset; +}; + +#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0 +#define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1 +#define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2 +#define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3 + +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) + +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22) +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25) + +#define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0) + +struct htt_tx_ring_selection_cfg_cmd { + __le32 info0; + __le32 info1; + __le32 info2; + __le32 tlv_filter_mask_in0; + __le32 tlv_filter_mask_in1; + __le32 tlv_filter_mask_in2; + __le32 tlv_filter_mask_in3; + __le32 reserverd[3]; +} __packed; + +#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0) +#define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4) +#define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8) + +#define HTT_TX_MON_FILTER_HYBRID_MODE \ + (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \ + HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \ + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \ + HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \ + HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2) + +struct htt_tx_ring_tlv_filter { + u32 tx_mon_downstream_tlv_flags; + u32 tx_mon_upstream_tlv_flags0; + u32 tx_mon_upstream_tlv_flags1; + u32 tx_mon_upstream_tlv_flags2; + bool tx_mon_mgmt_filter; + bool tx_mon_data_filter; + bool tx_mon_ctrl_filter; + u16 tx_mon_pkt_dma_len; +} __packed; + +enum htt_tx_mon_upstream_tlv_flags0 { + HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1), + HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10), + HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11), + HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12), + HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13), + HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14), + HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15), + HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16), +}; + +#define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11) + +/* HTT message target->host */ + +enum htt_t2h_msg_type { + HTT_T2H_MSG_TYPE_VERSION_CONF, + HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e, + HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f, + HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d, + HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c, + HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24, + HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28, + HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b, + HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c, +}; + +#define HTT_TARGET_VERSION_MAJOR 3 + +#define HTT_T2H_MSG_TYPE GENMASK(7, 0) +#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8) +#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16) + +struct htt_t2h_version_conf_msg { + __le32 version; +} __packed; + +#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8) +#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16) +#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0) +#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16) +#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0) +#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16) +#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16 + +struct htt_t2h_peer_map_event { + __le32 info; + __le32 mac_addr_l32; + __le32 info1; + __le32 info2; +} __packed; + +#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID +#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID +#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \ + HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 +#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M +#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S + +struct htt_t2h_peer_unmap_event { + __le32 info; + __le32 mac_addr_l32; + __le32 info1; +} __packed; + +struct htt_resp_msg { + union { + struct htt_t2h_version_conf_msg version_msg; + struct htt_t2h_peer_map_event peer_map_ev; + struct htt_t2h_peer_unmap_event peer_unmap_ev; + }; +} __packed; + +#define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\ + (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32))) +#define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0) +#define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8) +#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16) +#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0) +#define HTT_VDEV_TXRX_STATS_COMMON_TLV 0 +#define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1 + +struct htt_t2h_vdev_txrx_stats_ind { + __le32 vdev_id; + __le32 rx_msdu_byte_cnt_lo; + __le32 rx_msdu_byte_cnt_hi; + __le32 rx_msdu_cnt_lo; + __le32 rx_msdu_cnt_hi; + __le32 tx_msdu_byte_cnt_lo; + __le32 tx_msdu_byte_cnt_hi; + __le32 tx_msdu_cnt_lo; + __le32 tx_msdu_cnt_hi; + __le32 tx_retry_cnt_lo; + __le32 tx_retry_cnt_hi; + __le32 tx_retry_byte_cnt_lo; + __le32 tx_retry_byte_cnt_hi; + __le32 tx_drop_cnt_lo; + __le32 tx_drop_cnt_hi; + __le32 tx_drop_byte_cnt_lo; + __le32 tx_drop_byte_cnt_hi; + __le32 msdu_ttl_cnt_lo; + __le32 msdu_ttl_cnt_hi; + __le32 msdu_ttl_byte_cnt_lo; + __le32 msdu_ttl_byte_cnt_hi; +} __packed; + +struct htt_t2h_vdev_common_stats_tlv { + __le32 soc_drop_count_lo; + __le32 soc_drop_count_hi; +} __packed; + +/* ppdu stats + * + * @details + * The following field definitions describe the format of the HTT target + * to host ppdu stats indication message. + * + * + * |31 16|15 12|11 10|9 8|7 0 | + * |----------------------------------------------------------------------| + * | payload_size | rsvd |pdev_id|mac_id | msg type | + * |----------------------------------------------------------------------| + * | ppdu_id | + * |----------------------------------------------------------------------| + * | Timestamp in us | + * |----------------------------------------------------------------------| + * | reserved | + * |----------------------------------------------------------------------| + * | type-specific stats info | + * | (see htt_ppdu_stats.h) | + * |----------------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: Identifies this is a PPDU STATS indication + * message. + * Value: 0x1d + * - mac_id + * Bits 9:8 + * Purpose: mac_id of this ppdu_id + * Value: 0-3 + * - pdev_id + * Bits 11:10 + * Purpose: pdev_id of this ppdu_id + * Value: 0-3 + * 0 (for rings at SOC level), + * 1/2/3 PDEV -> 0/1/2 + * - payload_size + * Bits 31:16 + * Purpose: total tlv size + * Value: payload_size in bytes + */ + +#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10) +#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16) + +struct ath12k_htt_ppdu_stats_msg { + __le32 info; + __le32 ppdu_id; + __le32 timestamp; + __le32 rsvd; + u8 data[]; +} __packed; + +struct htt_tlv { + __le32 header; + u8 value[]; +} __packed; + +#define HTT_TLV_TAG GENMASK(11, 0) +#define HTT_TLV_LEN GENMASK(23, 12) + +enum HTT_PPDU_STATS_BW { + HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0, + HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1, + HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2, + HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3, + HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4, + HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */ + HTT_PPDU_STATS_BANDWIDTH_DYN = 6, +}; + +#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0) +#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8) +/* bw - HTT_PPDU_STATS_BW */ +#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16) + +struct htt_ppdu_stats_common { + __le32 ppdu_id; + __le16 sched_cmdid; + u8 ring_id; + u8 num_users; + __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/ + __le32 chain_mask; + __le32 fes_duration_us; /* frame exchange sequence */ + __le32 ppdu_sch_eval_start_tstmp_us; + __le32 ppdu_sch_end_tstmp_us; + __le32 ppdu_start_tstmp_us; + /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted + * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted + */ + __le16 phy_mode; + __le16 bw_mhz; +} __packed; + +enum htt_ppdu_stats_gi { + HTT_PPDU_STATS_SGI_0_8_US, + HTT_PPDU_STATS_SGI_0_4_US, + HTT_PPDU_STATS_SGI_1_6_US, + HTT_PPDU_STATS_SGI_3_2_US, +}; + +#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0) +#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4) + +enum HTT_PPDU_STATS_PPDU_TYPE { + HTT_PPDU_STATS_PPDU_TYPE_SU, + HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO, + HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA, + HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA, + HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG, + HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN, + HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP, + HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG, + HTT_PPDU_STATS_PPDU_TYPE_UL_RESP, + HTT_PPDU_STATS_PPDU_TYPE_MAX +}; + +#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0) +#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1) + +#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28) +#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29) + +#define HTT_USR_RATE_PREAMBLE(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M) +#define HTT_USR_RATE_BW(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M) +#define HTT_USR_RATE_NSS(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M) +#define HTT_USR_RATE_MCS(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M) +#define HTT_USR_RATE_GI(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M) +#define HTT_USR_RATE_DCM(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M) + +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28) +#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29) + +struct htt_ppdu_stats_user_rate { + u8 tid_num; + u8 reserved0; + __le16 sw_peer_id; + __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/ + __le16 ru_end; + __le16 ru_start; + __le16 resp_ru_end; + __le16 resp_ru_start; + __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */ + __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */ + /* Note: resp_rate_info is only valid for if resp_type is UL */ + __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */ +} __packed; + +#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0) +#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8) +#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9) +#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11) +#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14) +#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16) + +#define HTT_TX_INFO_IS_AMSDU(_flags) \ + u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M) +#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \ + u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M) +#define HTT_TX_INFO_RATECODE(_flags) \ + u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M) +#define HTT_TX_INFO_PEERID(_flags) \ + u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M) + +struct htt_tx_ppdu_stats_info { + struct htt_tlv tlv_hdr; + __le32 tx_success_bytes; + __le32 tx_retry_bytes; + __le32 tx_failed_bytes; + __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */ + __le16 tx_success_msdus; + __le16 tx_retry_msdus; + __le16 tx_failed_msdus; + __le16 tx_duration; /* united in us */ +} __packed; + +enum htt_ppdu_stats_usr_compln_status { + HTT_PPDU_STATS_USER_STATUS_OK, + HTT_PPDU_STATS_USER_STATUS_FILTERED, + HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT, + HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH, + HTT_PPDU_STATS_USER_STATUS_ABORT, +}; + +#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0) +#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4) +#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8) +#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9) + +#define HTT_USR_CMPLTN_IS_AMPDU(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M) +#define HTT_USR_CMPLTN_LONG_RETRY(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M) +#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \ + le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M) + +struct htt_ppdu_stats_usr_cmpltn_cmn { + u8 status; + u8 tid_num; + __le16 sw_peer_id; + /* RSSI value of last ack packet (units = dB above noise floor) */ + __le32 ack_rssi; + __le16 mpdu_tried; + __le16 mpdu_success; + __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/ +} __packed; + +#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0) +#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9) +#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25) + +#define HTT_PPDU_STATS_NON_QOS_TID 16 + +struct htt_ppdu_stats_usr_cmpltn_ack_ba_status { + __le32 ppdu_id; + __le16 sw_peer_id; + __le16 reserved0; + __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */ + __le16 current_seq; + __le16 start_seq; + __le32 success_bytes; +} __packed; + +struct htt_ppdu_user_stats { + u16 peer_id; + u16 delay_ba; + u32 tlv_flags; + bool is_valid_peer_id; + struct htt_ppdu_stats_user_rate rate; + struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn; + struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba; +}; + +#define HTT_PPDU_STATS_MAX_USERS 8 +#define HTT_PPDU_DESC_MAX_DEPTH 16 + +struct htt_ppdu_stats { + struct htt_ppdu_stats_common common; + struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS]; +}; + +struct htt_ppdu_stats_info { + u32 tlv_bitmap; + u32 ppdu_id; + u32 frame_type; + u32 frame_ctrl; + u32 delay_ba; + u32 bar_num_users; + struct htt_ppdu_stats ppdu_stats; + struct list_head list; +}; + +/* @brief target -> host MLO offset indiciation message + * + * @details + * The following field definitions describe the format of the HTT target + * to host mlo offset indication message. + * + * + * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0| + * |---------------------------------------------------------------------| + * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype| + * |---------------------------------------------------------------------| + * | sync_timestamp_lo_us | + * |---------------------------------------------------------------------| + * | sync_timestamp_hi_us | + * |---------------------------------------------------------------------| + * | mlo_offset_lo | + * |---------------------------------------------------------------------| + * | mlo_offset_hi | + * |---------------------------------------------------------------------| + * | mlo_offset_clcks | + * |---------------------------------------------------------------------| + * | rsvd2 | mlo_comp_clks |mlo_comp_us | + * |---------------------------------------------------------------------| + * | rsvd3 |mlo_comp_timer | + * |---------------------------------------------------------------------| + * Header fields + * - MSG_TYPE + * Bits 7:0 + * Purpose: Identifies this is a MLO offset indication msg + * - PDEV_ID + * Bits 9:8 + * Purpose: Pdev of this MLO offset + * - CHIP_ID + * Bits 12:10 + * Purpose: chip_id of this MLO offset + * - MAC_FREQ + * Bits 28:13 + * - SYNC_TIMESTAMP_LO_US + * Purpose: clock frequency of the mac HW block in MHz + * Bits: 31:0 + * Purpose: lower 32 bits of the WLAN global time stamp at which + * last sync interrupt was received + * - SYNC_TIMESTAMP_HI_US + * Bits: 31:0 + * Purpose: upper 32 bits of WLAN global time stamp at which + * last sync interrupt was received + * - MLO_OFFSET_LO + * Bits: 31:0 + * Purpose: lower 32 bits of the MLO offset in us + * - MLO_OFFSET_HI + * Bits: 31:0 + * Purpose: upper 32 bits of the MLO offset in us + * - MLO_COMP_US + * Bits: 15:0 + * Purpose: MLO time stamp compensation applied in us + * - MLO_COMP_CLCKS + * Bits: 25:16 + * Purpose: MLO time stamp compensation applied in clock ticks + * - MLO_COMP_TIMER + * Bits: 21:0 + * Purpose: Periodic timer at which compensation is applied + */ + +#define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0) +#define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8) + +struct ath12k_htt_mlo_offset_msg { + __le32 info; + __le32 sync_timestamp_lo_us; + __le32 sync_timestamp_hi_us; + __le32 mlo_offset_hi; + __le32 mlo_offset_lo; + __le32 mlo_offset_clks; + __le32 mlo_comp_clks; + __le32 mlo_comp_timer; +} __packed; + +/* @brief host -> target FW extended statistics retrieve + * + * @details + * The following field definitions describe the format of the HTT host + * to target FW extended stats retrieve message. + * The message specifies the type of stats the host wants to retrieve. + * + * |31 24|23 16|15 8|7 0| + * |-----------------------------------------------------------| + * | reserved | stats type | pdev_mask | msg type | + * |-----------------------------------------------------------| + * | config param [0] | + * |-----------------------------------------------------------| + * | config param [1] | + * |-----------------------------------------------------------| + * | config param [2] | + * |-----------------------------------------------------------| + * | config param [3] | + * |-----------------------------------------------------------| + * | reserved | + * |-----------------------------------------------------------| + * | cookie LSBs | + * |-----------------------------------------------------------| + * | cookie MSBs | + * |-----------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: identifies this is a extended stats upload request message + * Value: 0x10 + * - PDEV_MASK + * Bits 8:15 + * Purpose: identifies the mask of PDEVs to retrieve stats from + * Value: This is a overloaded field, refer to usage and interpretation of + * PDEV in interface document. + * Bit 8 : Reserved for SOC stats + * Bit 9 - 15 : Indicates PDEV_MASK in DBDC + * Indicates MACID_MASK in DBS + * - STATS_TYPE + * Bits 23:16 + * Purpose: identifies which FW statistics to upload + * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h) + * - Reserved + * Bits 31:24 + * - CONFIG_PARAM [0] + * Bits 31:0 + * Purpose: give an opaque configuration value to the specified stats type + * Value: stats-type specific configuration value + * Refer to htt_stats.h for interpretation for each stats sub_type + * - CONFIG_PARAM [1] + * Bits 31:0 + * Purpose: give an opaque configuration value to the specified stats type + * Value: stats-type specific configuration value + * Refer to htt_stats.h for interpretation for each stats sub_type + * - CONFIG_PARAM [2] + * Bits 31:0 + * Purpose: give an opaque configuration value to the specified stats type + * Value: stats-type specific configuration value + * Refer to htt_stats.h for interpretation for each stats sub_type + * - CONFIG_PARAM [3] + * Bits 31:0 + * Purpose: give an opaque configuration value to the specified stats type + * Value: stats-type specific configuration value + * Refer to htt_stats.h for interpretation for each stats sub_type + * - Reserved [31:0] for future use. + * - COOKIE_LSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: LSBs of the opaque cookie specified by the host-side requestor + * - COOKIE_MSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + */ + +struct htt_ext_stats_cfg_hdr { + u8 msg_type; + u8 pdev_mask; + u8 stats_type; + u8 reserved; +} __packed; + +struct htt_ext_stats_cfg_cmd { + struct htt_ext_stats_cfg_hdr hdr; + __le32 cfg_param0; + __le32 cfg_param1; + __le32 cfg_param2; + __le32 cfg_param3; + __le32 reserved; + __le32 cookie_lsb; + __le32 cookie_msb; +} __packed; + +/* htt stats config default params */ +#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0 +#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff +#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff +#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff +#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff +#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff +#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00 +#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00 + +/* HTT_DBG_EXT_STATS_PEER_INFO + * PARAMS: + * @config_param0: + * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request + * [Bit15 : Bit 1] htt_peer_stats_req_mode_t + * [Bit31 : Bit16] sw_peer_id + * @config_param1: + * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum) + * 0 bit htt_peer_stats_cmn_tlv + * 1 bit htt_peer_details_tlv + * 2 bit htt_tx_peer_rate_stats_tlv + * 3 bit htt_rx_peer_rate_stats_tlv + * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv + * 5 bit htt_rx_tid_stats_tlv + * 6 bit htt_msdu_flow_stats_tlv + * @config_param2: [Bit31 : Bit0] mac_addr31to0 + * @config_param3: [Bit15 : Bit0] mac_addr47to32 + * [Bit31 : Bit16] reserved + */ +#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0) +#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f + +/* Used to set different configs to the specified stats type.*/ +struct htt_ext_stats_cfg_params { + u32 cfg0; + u32 cfg1; + u32 cfg2; + u32 cfg3; +}; + +enum vdev_stats_offload_timer_duration { + ATH12K_STATS_TIMER_DUR_500MS = 1, + ATH12K_STATS_TIMER_DUR_1SEC = 2, + ATH12K_STATS_TIMER_DUR_2SEC = 3, +}; + +static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr) +{ + memcpy(addr, &addr_l32, 4); + memcpy(addr + 4, &addr_h16, ETH_ALEN - 4); +} + +int ath12k_dp_service_srng(struct ath12k_base *ab, + struct ath12k_ext_irq_grp *irq_grp, + int budget); +int ath12k_dp_htt_connect(struct ath12k_dp *dp); +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif); +void ath12k_dp_free(struct ath12k_base *ab); +int ath12k_dp_alloc(struct ath12k_base *ab); +void ath12k_dp_cc_config(struct ath12k_base *ab); +int ath12k_dp_pdev_alloc(struct ath12k_base *ab); +void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab); +void ath12k_dp_pdev_free(struct ath12k_base *ab); +int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type); +int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr); +void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr); +void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring); +int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, + enum hal_ring_type type, int ring_num, + int mac_id, int num_entries); +void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab, + struct dp_link_desc_bank *desc_bank, + u32 ring_type, struct dp_srng *ring); +int ath12k_dp_link_desc_setup(struct ath12k_base *ab, + struct dp_link_desc_bank *link_desc_banks, + u32 ring_type, struct hal_srng *srng, + u32 n_link_desc); +struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, + u32 cookie); +struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, + u32 desc_id); +#endif diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c new file mode 100644 index 000000000000..a214797c96a2 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -0,0 +1,2596 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "dp_mon.h" +#include "debug.h" +#include "dp_rx.h" +#include "dp_tx.h" +#include "peer.h" + +static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, + struct hal_rx_user_status *rx_user_status) +{ + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = + (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + + rx_user_status->ul_ofdma_user_v0_word0 = + __le32_to_cpu(ppdu_end_user->usr_resp_ref); + rx_user_status->ul_ofdma_user_v0_word1 = + __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext); +} + +static void +ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, + struct hal_rx_user_status *rx_user_status) +{ + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = + (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt); + u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt); + + rx_user_status->mpdu_ok_byte_count = + u32_get_bits(mpdu_ok_byte_count, + HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT); + rx_user_status->mpdu_err_byte_count = + u32_get_bits(mpdu_err_byte_count, + HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT); +} + +static void +ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct hal_rx_user_status *rx_user_status) +{ + rx_user_status->ast_index = ppdu_info->ast_index; + rx_user_status->tid = ppdu_info->tid; + rx_user_status->tcp_ack_msdu_count = + ppdu_info->tcp_ack_msdu_count; + rx_user_status->tcp_msdu_count = + ppdu_info->tcp_msdu_count; + rx_user_status->udp_msdu_count = + ppdu_info->udp_msdu_count; + rx_user_status->other_msdu_count = + ppdu_info->other_msdu_count; + rx_user_status->frame_control = ppdu_info->frame_control; + rx_user_status->frame_control_info_valid = + ppdu_info->frame_control_info_valid; + rx_user_status->data_sequence_control_info_valid = + ppdu_info->data_sequence_control_info_valid; + rx_user_status->first_data_seq_ctrl = + ppdu_info->first_data_seq_ctrl; + rx_user_status->preamble_type = ppdu_info->preamble_type; + rx_user_status->ht_flags = ppdu_info->ht_flags; + rx_user_status->vht_flags = ppdu_info->vht_flags; + rx_user_status->he_flags = ppdu_info->he_flags; + rx_user_status->rs_flags = ppdu_info->rs_flags; + + rx_user_status->mpdu_cnt_fcs_ok = + ppdu_info->num_mpdu_fcs_ok; + rx_user_status->mpdu_cnt_fcs_err = + ppdu_info->num_mpdu_fcs_err; + memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0], + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0])); + + ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status); +} + +static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_vht_sig_a_info *vht_sig = + (struct hal_rx_vht_sig_a_info *)tlv_data; + u32 nsts, group_id, info0, info1; + u8 gi_setting; + + info0 = __le32_to_cpu(vht_sig->info0); + info1 = __le32_to_cpu(vht_sig->info1); + + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); + ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS); + gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING); + switch (gi_setting) { + case HAL_RX_VHT_SIG_A_NORMAL_GI: + ppdu_info->gi = HAL_RX_GI_0_8_US; + break; + case HAL_RX_VHT_SIG_A_SHORT_GI: + case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY: + ppdu_info->gi = HAL_RX_GI_0_4_US; + break; + } + + ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC); + nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS); + if (ppdu_info->is_stbc && nsts > 0) + nsts = ((nsts + 1) >> 1) - 1; + + ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK); + ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); + ppdu_info->beamformed = u32_get_bits(info1, + HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); + group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); + if (group_id == 0 || group_id == 63) + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; + else + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; + ppdu_info->vht_flag_values5 = group_id; + ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) | + ppdu_info->nss); + ppdu_info->vht_flag_values2 = ppdu_info->bw; + ppdu_info->vht_flag_values4 = + u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); +} + +static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_ht_sig_info *ht_sig = + (struct hal_rx_ht_sig_info *)tlv_data; + u32 info0 = __le32_to_cpu(ht_sig->info0); + u32 info1 = __le32_to_cpu(ht_sig->info1); + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS); + ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW); + ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC); + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); + ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); + ppdu_info->nss = (ppdu_info->mcs >> 3); + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_lsig_b_info *lsigb = + (struct hal_rx_lsig_b_info *)tlv_data; + u32 info0 = __le32_to_cpu(lsigb->info0); + u8 rate; + + rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE); + switch (rate) { + case 1: + rate = HAL_RX_LEGACY_RATE_1_MBPS; + break; + case 2: + case 5: + rate = HAL_RX_LEGACY_RATE_2_MBPS; + break; + case 3: + case 6: + rate = HAL_RX_LEGACY_RATE_5_5_MBPS; + break; + case 4: + case 7: + rate = HAL_RX_LEGACY_RATE_11_MBPS; + break; + default: + rate = HAL_RX_LEGACY_RATE_INVALID; + } + + ppdu_info->rate = rate; + ppdu_info->cck_flag = 1; + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_lsig_a_info *lsiga = + (struct hal_rx_lsig_a_info *)tlv_data; + u32 info0 = __le32_to_cpu(lsiga->info0); + u8 rate; + + rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE); + switch (rate) { + case 8: + rate = HAL_RX_LEGACY_RATE_48_MBPS; + break; + case 9: + rate = HAL_RX_LEGACY_RATE_24_MBPS; + break; + case 10: + rate = HAL_RX_LEGACY_RATE_12_MBPS; + break; + case 11: + rate = HAL_RX_LEGACY_RATE_6_MBPS; + break; + case 12: + rate = HAL_RX_LEGACY_RATE_54_MBPS; + break; + case 13: + rate = HAL_RX_LEGACY_RATE_36_MBPS; + break; + case 14: + rate = HAL_RX_LEGACY_RATE_18_MBPS; + break; + case 15: + rate = HAL_RX_LEGACY_RATE_9_MBPS; + break; + default: + rate = HAL_RX_LEGACY_RATE_INVALID; + } + + ppdu_info->rate = rate; + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma = + (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data; + u32 info0, value; + + info0 = __le32_to_cpu(he_sig_b2_ofdma->info0); + + ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN; + + /* HE-data2 */ + ppdu_info->he_data2 |= HE_TXBF_KNOWN; + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS); + value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM); + value = value << HE_DCM_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING); + ppdu_info->ldpc = value; + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + + /* HE-data4 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID); + value = value << HE_STA_ID_SHIFT; + ppdu_info->he_data4 |= value; + + ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); + ppdu_info->beamformed = u32_get_bits(info0, + HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; +} + +static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu = + (struct hal_rx_he_sig_b2_mu_info *)tlv_data; + u32 info0, value; + + info0 = __le32_to_cpu(he_sig_b2_mu->info0); + + ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN; + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS); + value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING); + ppdu_info->ldpc = value; + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID); + value = value << HE_STA_ID_SHIFT; + ppdu_info->he_data4 |= value; + + ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS); +} + +static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu = + (struct hal_rx_he_sig_b1_mu_info *)tlv_data; + u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0); + u16 ru_tones; + + ru_tones = u32_get_bits(info0, + HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION); + ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + ppdu_info->he_RU[0] = ru_tones; + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; +} + +static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl = + (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data; + u32 info0, info1, value; + u16 he_gi = 0, he_ltf = 0; + + info0 = __le32_to_cpu(he_sig_a_mu_dl->info0); + info1 = __le32_to_cpu(he_sig_a_mu_dl->info1); + + ppdu_info->he_mu_flags = 1; + + ppdu_info->he_data1 = HE_MU_FORMAT_TYPE; + ppdu_info->he_data1 |= + HE_BSS_COLOR_KNOWN | + HE_DL_UL_KNOWN | + HE_LDPC_EXTRA_SYMBOL_KNOWN | + HE_STBC_KNOWN | + HE_DATA_BW_RU_KNOWN | + HE_DOPPLER_KNOWN; + + ppdu_info->he_data2 = + HE_GI_KNOWN | + HE_LTF_SYMBOLS_KNOWN | + HE_PRE_FEC_PADDING_KNOWN | + HE_PE_DISAMBIGUITY_KNOWN | + HE_TXOP_KNOWN | + HE_MIDABLE_PERIODICITY_KNOWN; + + /* data3 */ + ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR); + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG); + value = value << HE_DL_UL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA); + value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC); + value = value << HE_STBC_SHIFT; + ppdu_info->he_data3 |= value; + + /* data4 */ + ppdu_info->he_data4 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE); + ppdu_info->he_data4 = value; + + /* data5 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW); + ppdu_info->he_data5 = value; + ppdu_info->bw = value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + break; + } + + ppdu_info->gi = he_gi; + value = he_gi << HE_GI_SHIFT; + ppdu_info->he_data5 |= value; + + value = he_ltf << HE_LTF_SIZE_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB); + value = (value << HE_LTF_SYM_SHIFT); + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR); + value = value << HE_PRE_FEC_PAD_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM); + value = value << HE_PE_DISAMBIGUITY_SHIFT; + ppdu_info->he_data5 |= value; + + /*data6*/ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION); + value = value << HE_DOPPLER_SHIFT; + ppdu_info->he_data6 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION); + value = value << HE_TXOP_SHIFT; + ppdu_info->he_data6 |= value; + + /* HE-MU Flags */ + /* HE-MU-flags1 */ + ppdu_info->he_flags1 = + HE_SIG_B_MCS_KNOWN | + HE_SIG_B_DCM_KNOWN | + HE_SIG_B_COMPRESSION_FLAG_1_KNOWN | + HE_SIG_B_SYM_NUM_KNOWN | + HE_RU_0_KNOWN; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB); + ppdu_info->he_flags1 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB); + value = value << HE_DCM_FLAG_1_SHIFT; + ppdu_info->he_flags1 |= value; + + /* HE-MU-flags2 */ + ppdu_info->he_flags2 = HE_BW_KNOWN; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW); + ppdu_info->he_flags2 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB); + value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT; + ppdu_info->he_flags2 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB); + value = value - 1; + value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT; + ppdu_info->he_flags2 |= value; + + ppdu_info->is_stbc = info1 & + HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC; + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; +} + +static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_he_sig_a_su_info *he_sig_a = + (struct hal_rx_he_sig_a_su_info *)tlv_data; + u32 info0, info1, value; + u32 dcm; + u8 he_dcm = 0, he_stbc = 0; + u16 he_gi = 0, he_ltf = 0; + + ppdu_info->he_flags = 1; + + info0 = __le32_to_cpu(he_sig_a->info0); + info1 = __le32_to_cpu(he_sig_a->info1); + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND); + if (value == 0) + ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE; + else + ppdu_info->he_data1 = HE_SU_FORMAT_TYPE; + + ppdu_info->he_data1 |= + HE_BSS_COLOR_KNOWN | + HE_BEAM_CHANGE_KNOWN | + HE_DL_UL_KNOWN | + HE_MCS_KNOWN | + HE_DCM_KNOWN | + HE_CODING_KNOWN | + HE_LDPC_EXTRA_SYMBOL_KNOWN | + HE_STBC_KNOWN | + HE_DATA_BW_RU_KNOWN | + HE_DOPPLER_KNOWN; + + ppdu_info->he_data2 |= + HE_GI_KNOWN | + HE_TXBF_KNOWN | + HE_PE_DISAMBIGUITY_KNOWN | + HE_TXOP_KNOWN | + HE_LTF_SYMBOLS_KNOWN | + HE_PRE_FEC_PADDING_KNOWN | + HE_MIDABLE_PERIODICITY_KNOWN; + + ppdu_info->he_data3 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR); + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE); + value = value << HE_BEAM_CHANGE_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG); + value = value << HE_DL_UL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS); + ppdu_info->mcs = value; + value = value << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); + he_dcm = value; + value = value << HE_DCM_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING); + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA); + value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC); + he_stbc = value; + value = value << HE_STBC_SHIFT; + ppdu_info->he_data3 |= value; + + /* data4 */ + ppdu_info->he_data4 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE); + + /* data5 */ + value = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW); + ppdu_info->he_data5 = value; + ppdu_info->bw = value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_1_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + if (he_dcm && he_stbc) { + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + } else { + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + } + break; + } + ppdu_info->gi = he_gi; + value = he_gi << HE_GI_SHIFT; + ppdu_info->he_data5 |= value; + value = he_ltf << HE_LTF_SIZE_SHIFT; + ppdu_info->ltf_size = he_ltf; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + value = (value << HE_LTF_SYM_SHIFT); + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR); + value = value << HE_PRE_FEC_PAD_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF); + value = value << HE_TXBF_SHIFT; + ppdu_info->he_data5 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM); + value = value << HE_PE_DISAMBIGUITY_SHIFT; + ppdu_info->he_data5 |= value; + + /* data6 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + value++; + ppdu_info->he_data6 = value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND); + value = value << HE_DOPPLER_SHIFT; + ppdu_info->he_data6 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION); + value = value << HE_TXOP_SHIFT; + ppdu_info->he_data6 |= value; + + ppdu_info->mcs = + u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS); + ppdu_info->bw = + u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW); + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING); + ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC); + ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF); + dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); + ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + ppdu_info->dcm = dcm; + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static enum hal_rx_mon_status +ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, + struct ath12k_mon_data *pmon, + u32 tlv_tag, u8 *tlv_data, u32 userid) +{ + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + u32 info[7]; + + switch (tlv_tag) { + case HAL_RX_PPDU_START: { + struct hal_rx_ppdu_start *ppdu_start = + (struct hal_rx_ppdu_start *)tlv_data; + + info[0] = __le32_to_cpu(ppdu_start->info0); + + ppdu_info->ppdu_id = + u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID); + ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num); + ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts); + + if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) { + ppdu_info->last_ppdu_id = ppdu_info->ppdu_id; + ppdu_info->num_users = 0; + memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0, + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0])); + } + break; + } + case HAL_RX_PPDU_END_USER_STATS: { + struct hal_rx_ppdu_end_user_stats *eu_stats = + (struct hal_rx_ppdu_end_user_stats *)tlv_data; + + info[0] = __le32_to_cpu(eu_stats->info0); + info[1] = __le32_to_cpu(eu_stats->info1); + info[2] = __le32_to_cpu(eu_stats->info2); + info[4] = __le32_to_cpu(eu_stats->info4); + info[5] = __le32_to_cpu(eu_stats->info5); + info[6] = __le32_to_cpu(eu_stats->info6); + + ppdu_info->ast_index = + u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX); + ppdu_info->fc_valid = + u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID); + ppdu_info->tid = + ffs(u32_get_bits(info[6], + HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP) + - 1); + ppdu_info->tcp_msdu_count = + u32_get_bits(info[4], + HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT); + ppdu_info->udp_msdu_count = + u32_get_bits(info[4], + HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT); + ppdu_info->other_msdu_count = + u32_get_bits(info[5], + HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT); + ppdu_info->tcp_ack_msdu_count = + u32_get_bits(info[5], + HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT); + ppdu_info->preamble_type = + u32_get_bits(info[1], + HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE); + ppdu_info->num_mpdu_fcs_ok = + u32_get_bits(info[1], + HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK); + ppdu_info->num_mpdu_fcs_err = + u32_get_bits(info[0], + HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR); + switch (ppdu_info->preamble_type) { + case HAL_RX_PREAMBLE_11N: + ppdu_info->ht_flags = 1; + break; + case HAL_RX_PREAMBLE_11AC: + ppdu_info->vht_flags = 1; + break; + case HAL_RX_PREAMBLE_11AX: + ppdu_info->he_flags = 1; + break; + default: + break; + } + + if (userid < HAL_MAX_UL_MU_USERS) { + struct hal_rx_user_status *rxuser_stats = + &ppdu_info->userstats[userid]; + ppdu_info->num_users += 1; + + ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats); + ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info, + rxuser_stats); + } + ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]); + ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]); + break; + } + case HAL_RX_PPDU_END_USER_STATS_EXT: { + struct hal_rx_ppdu_end_user_stats_ext *eu_stats = + (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data; + ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1); + ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2); + ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3); + ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4); + ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5); + ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6); + break; + } + case HAL_PHYRX_HT_SIG: + ath12k_dp_mon_parse_ht_sig(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_L_SIG_B: + ath12k_dp_mon_parse_l_sig_b(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_L_SIG_A: + ath12k_dp_mon_parse_l_sig_a(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_VHT_SIG_A: + ath12k_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_A_SU: + ath12k_dp_mon_parse_he_sig_su(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_A_MU_DL: + ath12k_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B1_MU: + ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B2_MU: + ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B2_OFDMA: + ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_RSSI_LEGACY: { + struct hal_rx_phyrx_rssi_legacy_info *rssi = + (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; + u32 reception_type = 0; + u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]); + + info[0] = __le32_to_cpu(rssi->info0); + + /* TODO: Please note that the combined rssi will not be accurate + * in MU case. Rssi in MU needs to be retrieved from + * PHYRX_OTHER_RECEIVE_INFO TLV. + */ + ppdu_info->rssi_comb = + u32_get_bits(info[0], + HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB); + reception_type = + u32_get_bits(rssi_legacy_info, + HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION); + + switch (reception_type) { + case HAL_RECEPTION_TYPE_ULOFMDA: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; + break; + case HAL_RECEPTION_TYPE_ULMIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; + break; + default: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; + break; + } + break; + } + case HAL_RXPCU_PPDU_END_INFO: { + struct hal_rx_ppdu_end_duration *ppdu_rx_duration = + (struct hal_rx_ppdu_end_duration *)tlv_data; + + info[0] = __le32_to_cpu(ppdu_rx_duration->info0); + ppdu_info->rx_duration = + u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION); + ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]); + ppdu_info->tsft = (ppdu_info->tsft << 32) | + __le32_to_cpu(ppdu_rx_duration->rsvd0[0]); + break; + } + case HAL_RX_MPDU_START: { + struct hal_rx_mpdu_start *mpdu_start = + (struct hal_rx_mpdu_start *)tlv_data; + struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; + u16 peer_id; + + info[1] = __le32_to_cpu(mpdu_start->info1); + peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID); + if (peer_id) + ppdu_info->peer_id = peer_id; + + ppdu_info->mpdu_len += u32_get_bits(info[1], + HAL_RX_MPDU_START_INFO2_MPDU_LEN); + if (userid < HAL_MAX_UL_MU_USERS) { + info[0] = __le32_to_cpu(mpdu_start->info0); + ppdu_info->userid = userid; + ppdu_info->ampdu_id[userid] = + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID); + } + + mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); + if (!mon_mpdu) + return HAL_RX_MON_STATUS_PPDU_NOT_DONE; + + break; + } + case HAL_RX_MSDU_START: + /* TODO: add msdu start parsing logic */ + break; + case HAL_MON_BUF_ADDR: { + struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; + struct dp_mon_packet_info *packet_info = + (struct dp_mon_packet_info *)tlv_data; + int buf_id = u32_get_bits(packet_info->cookie, + DP_RXDMA_BUF_COOKIE_BUF_ID); + struct sk_buff *msdu; + struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; + struct ath12k_skb_rxcb *rxcb; + + spin_lock_bh(&buf_ring->idr_lock); + msdu = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!msdu)) { + ath12k_warn(ab, "montior destination with invalid buf_id %d\n", + buf_id); + return HAL_RX_MON_STATUS_PPDU_NOT_DONE; + } + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + if (mon_mpdu->tail) + mon_mpdu->tail->next = msdu; + else + mon_mpdu->tail = msdu; + + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + + break; + } + case HAL_RX_MSDU_END: { + struct rx_msdu_end_qcn9274 *msdu_end = + (struct rx_msdu_end_qcn9274 *)tlv_data; + bool is_first_msdu_in_mpdu; + u16 msdu_end_info; + + msdu_end_info = __le16_to_cpu(msdu_end->info5); + is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info, + RX_MSDU_END_INFO5_FIRST_MSDU); + if (is_first_msdu_in_mpdu) { + pmon->mon_mpdu->head = pmon->mon_mpdu->tail; + pmon->mon_mpdu->tail = NULL; + } + break; + } + case HAL_RX_MPDU_END: + list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); + break; + case HAL_DUMMY: + return HAL_RX_MON_STATUS_BUF_DONE; + case HAL_RX_PPDU_END_STATUS_DONE: + case 0: + return HAL_RX_MON_STATUS_PPDU_DONE; + default: + break; + } + + return HAL_RX_MON_STATUS_PPDU_NOT_DONE; +} + +static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu) +{ + u32 rx_pkt_offset, l2_hdr_offset; + + rx_pkt_offset = ar->ab->hw_params->hal_desc_sz; + l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, + (struct hal_rx_desc *)msdu->data); + skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); +} + +static struct sk_buff * +ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, + u32 mac_id, struct sk_buff *head_msdu, + struct ieee80211_rx_status *rxs, bool *fcs_err) +{ + struct ath12k_base *ab = ar->ab; + struct sk_buff *msdu, *mpdu_buf, *prev_buf; + struct hal_rx_desc *rx_desc; + u8 *hdr_desc, *dest, decap_format; + struct ieee80211_hdr_3addr *wh; + u32 err_bitmap; + + mpdu_buf = NULL; + + if (!head_msdu) + goto err_merge_fail; + + rx_desc = (struct hal_rx_desc *)head_msdu->data; + err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); + + if (err_bitmap & HAL_RX_MPDU_ERR_FCS) + *fcs_err = true; + + decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc); + + ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); + + if (decap_format == DP_RX_DECAP_TYPE_RAW) { + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu); + + prev_buf = head_msdu; + msdu = head_msdu->next; + + while (msdu) { + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + + prev_buf = msdu; + msdu = msdu->next; + } + + prev_buf->next = NULL; + + skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); + } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { + u8 qos_pkt = 0; + + rx_desc = (struct hal_rx_desc *)head_msdu->data; + hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc); + + /* Base size */ + wh = (struct ieee80211_hdr_3addr *)hdr_desc; + + if (ieee80211_is_data_qos(wh->frame_control)) + qos_pkt = 1; + + msdu = head_msdu; + + while (msdu) { + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + if (qos_pkt) { + dest = skb_push(msdu, sizeof(__le16)); + if (!dest) + goto err_merge_fail; + memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); + } + prev_buf = msdu; + msdu = msdu->next; + } + dest = skb_put(prev_buf, HAL_RX_FCS_LEN); + if (!dest) + goto err_merge_fail; + + ath12k_dbg(ab, ATH12K_DBG_DATA, + "mpdu_buf %pK mpdu_buf->len %u", + prev_buf, prev_buf->len); + } else { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "decap format %d is not supported!\n", + decap_format); + goto err_merge_fail; + } + + return head_msdu; + +err_merge_fail: + if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "err_merge_fail mpdu_buf %pK", mpdu_buf); + /* Free the head buffer */ + dev_kfree_skb_any(mpdu_buf); + } + return NULL; +} + +static void +ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, + u8 *rtap_buf) +{ + u32 rtap_len = 0; + + put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); +} + +static void +ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, + u8 *rtap_buf) +{ + u32 rtap_len = 0; + + put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_RU[0]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[1]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[2]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[3]; +} + +static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, + struct hal_rx_mon_ppdu_info *ppduinfo, + struct sk_buff *mon_skb, + struct ieee80211_rx_status *rxs) +{ + struct ieee80211_supported_band *sband; + u8 *ptr = NULL; + u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid]; + + rxs->flag |= RX_FLAG_MACTIME_START; + rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; + rxs->nss = ppduinfo->nss + 1; + + if (ampdu_id) { + rxs->flag |= RX_FLAG_AMPDU_DETAILS; + rxs->ampdu_reference = ampdu_id; + } + + if (ppduinfo->he_mu_flags) { + rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; + rxs->encoding = RX_ENC_HE; + ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); + ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr); + } else if (ppduinfo->he_flags) { + rxs->flag |= RX_FLAG_RADIOTAP_HE; + rxs->encoding = RX_ENC_HE; + ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); + ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr); + rxs->rate_idx = ppduinfo->rate; + } else if (ppduinfo->vht_flags) { + rxs->encoding = RX_ENC_VHT; + rxs->rate_idx = ppduinfo->rate; + } else if (ppduinfo->ht_flags) { + rxs->encoding = RX_ENC_HT; + rxs->rate_idx = ppduinfo->rate; + } else { + rxs->encoding = RX_ENC_LEGACY; + sband = &ar->mac.sbands[rxs->band]; + rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate, + ppduinfo->cck_flag); + } + + rxs->mactime = ppduinfo->tsft; +} + +static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, + struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), + }; + struct ieee80211_rx_status *rx_status; + struct ieee80211_radiotap_he *he = NULL; + struct ieee80211_sta *pubsta = NULL; + struct ath12k_peer *peer; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u8 decap = DP_RX_DECAP_TYPE_RAW; + bool is_mcbc = rxcb->is_mcbc; + bool is_eapol_tkip = rxcb->is_eapol; + + if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) && + !(status->flag & RX_FLAG_SKIP_MONITOR)) { + he = skb_push(msdu, sizeof(known)); + memcpy(he, &known, sizeof(known)); + status->flag |= RX_FLAG_RADIOTAP_HE; + } + + if (!(status->flag & RX_FLAG_ONLY_MONITOR)) + decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc); + spin_lock_bh(&ar->ab->base_lock); + peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu); + if (peer && peer->sta) + pubsta = peer->sta; + spin_unlock_bh(&ar->ab->base_lock); + + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + msdu, + msdu->len, + peer ? peer->addr : NULL, + rxcb->tid, + (is_mcbc) ? "mcast" : "ucast", + (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", + (status->encoding == RX_ENC_HT) ? "ht" : "", + (status->encoding == RX_ENC_VHT) ? "vht" : "", + (status->encoding == RX_ENC_HE) ? "he" : "", + (status->bw == RATE_INFO_BW_40) ? "40" : "", + (status->bw == RATE_INFO_BW_80) ? "80" : "", + (status->bw == RATE_INFO_BW_160) ? "160" : "", + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", + status->rate_idx, + status->nss, + status->freq, + status->band, status->flag, + !!(status->flag & RX_FLAG_FAILED_FCS_CRC), + !!(status->flag & RX_FLAG_MMIC_ERROR), + !!(status->flag & RX_FLAG_AMSDU_MORE)); + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", + msdu->data, msdu->len); + rx_status = IEEE80211_SKB_RXCB(msdu); + *rx_status = *status; + + /* TODO: trace rx packet */ + + /* PN for multicast packets are not validate in HW, + * so skip 802.3 rx path + * Also, fast_rx expectes the STA to be authorized, hence + * eapol packets are sent in slow path. + */ + if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip && + !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) + rx_status->flag |= RX_FLAG_8023; + + ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); +} + +static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, + struct sk_buff *head_msdu, + struct hal_rx_mon_ppdu_info *ppduinfo, + struct napi_struct *napi) +{ + struct ath12k_pdev_dp *dp = &ar->dp; + struct sk_buff *mon_skb, *skb_next, *header; + struct ieee80211_rx_status *rxs = &dp->rx_status; + bool fcs_err = false; + + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu, + rxs, &fcs_err); + if (!mon_skb) + goto mon_deliver_fail; + + header = mon_skb; + rxs->flag = 0; + + if (fcs_err) + rxs->flag = RX_FLAG_FAILED_FCS_CRC; + + do { + skb_next = mon_skb->next; + if (!skb_next) + rxs->flag &= ~RX_FLAG_AMSDU_MORE; + else + rxs->flag |= RX_FLAG_AMSDU_MORE; + + if (mon_skb == header) { + header = NULL; + rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; + } else { + rxs->flag |= RX_FLAG_ALLOW_SAME_PN; + } + rxs->flag |= RX_FLAG_ONLY_MONITOR; + ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs); + ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs); + mon_skb = skb_next; + } while (mon_skb); + rxs->flag = 0; + + return 0; + +mon_deliver_fail: + mon_skb = head_msdu; + while (mon_skb) { + skb_next = mon_skb->next; + dev_kfree_skb_any(mon_skb); + mon_skb = skb_next; + } + return -EINVAL; +} + +static enum hal_rx_mon_status +ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon, + struct sk_buff *skb) +{ + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + struct hal_tlv_hdr *tlv; + enum hal_rx_mon_status hal_status; + u32 tlv_userid = 0; + u16 tlv_tag, tlv_len; + u8 *ptr = skb->data; + + memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); + + do { + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); + tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN); + tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID); + ptr += sizeof(*tlv); + + /* The actual length of PPDU_END is the combined length of many PHY + * TLVs that follow. Skip the TLV header and + * rx_rxpcu_classification_overview that follows the header to get to + * next TLV. + */ + + if (tlv_tag == HAL_RX_PPDU_END) + tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); + + hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, + tlv_tag, ptr, tlv_userid); + ptr += tlv_len; + ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN); + + if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) + break; + + } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE); + + return hal_status; +} + +enum hal_rx_mon_status +ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, + struct ath12k_mon_data *pmon, + int mac_id, + struct sk_buff *skb, + struct napi_struct *napi) +{ + struct ath12k_base *ab = ar->ab; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + struct dp_mon_mpdu *tmp; + struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; + struct sk_buff *head_msdu, *tail_msdu; + enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE; + + ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + + list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; + + if (head_msdu && tail_msdu) { + ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, + ppdu_info, napi); + } + + kfree(mon_mpdu); + } + return hal_status; +} + +int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, + struct dp_rxdma_ring *buf_ring, + int req_entries) +{ + struct hal_mon_buf_ring *mon_buf; + struct sk_buff *skb; + struct hal_srng *srng; + dma_addr_t paddr; + u32 cookie, buf_id; + + srng = &ab->hal.srng_list[buf_ring->refill_buf_ring.ring_id]; + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + while (req_entries > 0) { + skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); + if (unlikely(!skb)) + goto fail_alloc_skb; + + if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) { + skb_pull(skb, + PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(ab->dev, paddr))) + goto fail_free_skb; + + spin_lock_bh(&buf_ring->idr_lock); + buf_id = idr_alloc(&buf_ring->bufs_idr, skb, 0, + buf_ring->bufs_max * 3, GFP_ATOMIC); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(buf_id < 0)) + goto fail_dma_unmap; + + mon_buf = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (unlikely(!mon_buf)) + goto fail_idr_remove; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + + cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr)); + mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr)); + mon_buf->cookie = cpu_to_le64(cookie); + + req_entries--; + } + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + return 0; + +fail_idr_remove: + spin_lock_bh(&buf_ring->idr_lock); + idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); +fail_alloc_skb: + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + return -ENOMEM; +} + +static struct dp_mon_tx_ppdu_info * +ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon, + unsigned int ppdu_id, + enum dp_mon_tx_ppdu_info_type type) +{ + struct dp_mon_tx_ppdu_info *tx_ppdu_info; + + if (type == DP_MON_TX_PROT_PPDU_INFO) { + tx_ppdu_info = pmon->tx_prot_ppdu_info; + + if (tx_ppdu_info && !tx_ppdu_info->is_used) + return tx_ppdu_info; + kfree(tx_ppdu_info); + } else { + tx_ppdu_info = pmon->tx_data_ppdu_info; + + if (tx_ppdu_info && !tx_ppdu_info->is_used) + return tx_ppdu_info; + kfree(tx_ppdu_info); + } + + /* allocate new tx_ppdu_info */ + tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC); + if (!tx_ppdu_info) + return NULL; + + tx_ppdu_info->is_used = 0; + tx_ppdu_info->ppdu_id = ppdu_id; + + if (type == DP_MON_TX_PROT_PPDU_INFO) + pmon->tx_prot_ppdu_info = tx_ppdu_info; + else + pmon->tx_data_ppdu_info = tx_ppdu_info; + + return tx_ppdu_info; +} + +static struct dp_mon_tx_ppdu_info * +ath12k_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon, + u16 tlv_tag) +{ + switch (tlv_tag) { + case HAL_TX_FES_SETUP: + case HAL_TX_FLUSH: + case HAL_PCU_PPDU_SETUP_INIT: + case HAL_TX_PEER_ENTRY: + case HAL_TX_QUEUE_EXTENSION: + case HAL_TX_MPDU_START: + case HAL_TX_MSDU_START: + case HAL_TX_DATA: + case HAL_MON_BUF_ADDR: + case HAL_TX_MPDU_END: + case HAL_TX_LAST_MPDU_FETCHED: + case HAL_TX_LAST_MPDU_END: + case HAL_COEX_TX_REQ: + case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP: + case HAL_SCH_CRITICAL_TLV_REFERENCE: + case HAL_TX_FES_SETUP_COMPLETE: + case HAL_TQM_MPDU_GLOBAL_START: + case HAL_SCHEDULER_END: + case HAL_TX_FES_STATUS_USER_PPDU: + break; + case HAL_TX_FES_STATUS_PROT: { + if (!pmon->tx_prot_ppdu_info->is_used) + pmon->tx_prot_ppdu_info->is_used = true; + + return pmon->tx_prot_ppdu_info; + } + } + + if (!pmon->tx_data_ppdu_info->is_used) + pmon->tx_data_ppdu_info->is_used = true; + + return pmon->tx_data_ppdu_info; +} + +#define MAX_MONITOR_HEADER 512 +#define MAX_DUMMY_FRM_BODY 128 + +struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY); + if (!skb) + return NULL; + + skb_reserve(skb, MAX_MONITOR_HEADER); + + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data); + + return skb; +} + +static int +ath12k_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct sk_buff *skb; + struct ieee80211_cts *cts; + + skb = ath12k_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + cts = (struct ieee80211_cts *)skb->data; + memset(cts, 0, MAX_DUMMY_FRM_BODY); + cts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); + cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra)); + + skb_put(skb, sizeof(*cts)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; +} + +static int +ath12k_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct sk_buff *skb; + struct ieee80211_rts *rts; + + skb = ath12k_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + rts = (struct ieee80211_rts *)skb->data; + memset(rts, 0, MAX_DUMMY_FRM_BODY); + rts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra)); + memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta)); + + skb_put(skb, sizeof(*rts)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; +} + +static int +ath12k_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct sk_buff *skb; + struct ieee80211_qos_hdr *qhdr; + + skb = ath12k_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + qhdr = (struct ieee80211_qos_hdr *)skb->data; + memset(qhdr, 0, MAX_DUMMY_FRM_BODY); + qhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); + qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN); + memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN); + + skb_put(skb, sizeof(*qhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; +} + +static int +ath12k_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct sk_buff *skb; + struct dp_mon_qosframe_addr4 *qhdr; + + skb = ath12k_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + qhdr = (struct dp_mon_qosframe_addr4 *)skb->data; + memset(qhdr, 0, MAX_DUMMY_FRM_BODY); + qhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); + qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN); + memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN); + memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN); + + skb_put(skb, sizeof(*qhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; +} + +static int +ath12k_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct sk_buff *skb; + struct dp_mon_frame_min_one *fbmhdr; + + skb = ath12k_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + fbmhdr = (struct dp_mon_frame_min_one *)skb->data; + memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY); + fbmhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK); + memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + + /* set duration zero for ack frame */ + fbmhdr->duration = 0; + + skb_put(skb, sizeof(*fbmhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; +} + +static int +ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + int ret = 0; + + switch (tx_ppdu_info->rx_status.medium_prot_type) { + case DP_MON_TX_MEDIUM_RTS_LEGACY: + case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW: + case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW: + ret = ath12k_dp_mon_tx_gen_rts_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_CTS2SELF: + ret = ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR: + ret = ath12k_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR: + ret = ath12k_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info); + break; + } + + return ret; +} + +static enum dp_mon_tx_tlv_status +ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, + struct ath12k_mon_data *pmon, + u16 tlv_tag, u8 *tlv_data, u32 userid) +{ + struct dp_mon_tx_ppdu_info *tx_ppdu_info; + enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + u32 info[7]; + + tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag); + + switch (tlv_tag) { + case HAL_TX_FES_SETUP: { + struct hal_tx_fes_setup *tx_fes_setup = + (struct hal_tx_fes_setup *)tlv_data; + + info[0] = __le32_to_cpu(tx_fes_setup->info0); + tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id); + tx_ppdu_info->num_users = + u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS); + status = DP_MON_TX_FES_SETUP; + break; + } + + case HAL_TX_FES_STATUS_END: { + struct hal_tx_fes_status_end *tx_fes_status_end = + (struct hal_tx_fes_status_end *)tlv_data; + u32 tst_15_0, tst_31_16; + + info[0] = __le32_to_cpu(tx_fes_status_end->info0); + tst_15_0 = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0); + tst_31_16 = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16); + + tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16)); + status = DP_MON_TX_FES_STATUS_END; + break; + } + + case HAL_RX_RESPONSE_REQUIRED_INFO: { + struct hal_rx_resp_req_info *rx_resp_req_info = + (struct hal_rx_resp_req_info *)tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(rx_resp_req_info->info0); + info[1] = __le32_to_cpu(rx_resp_req_info->info1); + info[2] = __le32_to_cpu(rx_resp_req_info->info2); + info[3] = __le32_to_cpu(rx_resp_req_info->info3); + info[4] = __le32_to_cpu(rx_resp_req_info->info4); + info[5] = __le32_to_cpu(rx_resp_req_info->info5); + + tx_ppdu_info->rx_status.ppdu_id = + u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID); + tx_ppdu_info->rx_status.reception_type = + u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE); + tx_ppdu_info->rx_status.rx_duration = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION); + tx_ppdu_info->rx_status.mcs = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS); + tx_ppdu_info->rx_status.sgi = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI); + tx_ppdu_info->rx_status.is_stbc = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC); + tx_ppdu_info->rx_status.ldpc = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC); + tx_ppdu_info->rx_status.is_ampdu = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU); + tx_ppdu_info->rx_status.num_users = + u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER); + + addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0); + addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0); + addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2); + + if (tx_ppdu_info->rx_status.reception_type == 0) + ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info); + status = DP_MON_RX_RESPONSE_REQUIRED_INFO; + break; + } + + case HAL_PCU_PPDU_SETUP_INIT: { + struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = + (struct hal_tx_pcu_ppdu_setup_init *)tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(ppdu_setup->info0); + info[1] = __le32_to_cpu(ppdu_setup->info1); + info[2] = __le32_to_cpu(ppdu_setup->info2); + info[3] = __le32_to_cpu(ppdu_setup->info3); + info[4] = __le32_to_cpu(ppdu_setup->info4); + info[5] = __le32_to_cpu(ppdu_setup->info5); + info[6] = __le32_to_cpu(ppdu_setup->info6); + + /* protection frame address 1 */ + addr_32 = u32_get_bits(info[1], + HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0); + addr_16 = u32_get_bits(info[2], + HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + /* protection frame address 2 */ + addr_16 = u32_get_bits(info[2], + HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0); + addr_32 = u32_get_bits(info[3], + HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2); + + /* protection frame address 3 */ + addr_32 = u32_get_bits(info[4], + HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0); + addr_16 = u32_get_bits(info[5], + HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3); + + /* protection frame address 4 */ + addr_16 = u32_get_bits(info[5], + HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0); + addr_32 = u32_get_bits(info[6], + HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4); + + status = u32_get_bits(info[0], + HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE); + break; + } + + case HAL_TX_QUEUE_EXTENSION: { + struct hal_tx_queue_exten *tx_q_exten = + (struct hal_tx_queue_exten *)tlv_data; + + info[0] = __le32_to_cpu(tx_q_exten->info0); + + tx_ppdu_info->rx_status.frame_control = + u32_get_bits(info[0], + HAL_TX_Q_EXT_INFO0_FRAME_CTRL); + tx_ppdu_info->rx_status.fc_valid = true; + break; + } + + case HAL_TX_FES_STATUS_START: { + struct hal_tx_fes_status_start *tx_fes_start = + (struct hal_tx_fes_status_start *)tlv_data; + + info[0] = __le32_to_cpu(tx_fes_start->info0); + + tx_ppdu_info->rx_status.medium_prot_type = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE); + break; + } + + case HAL_TX_FES_STATUS_PROT: { + struct hal_tx_fes_status_prot *tx_fes_status = + (struct hal_tx_fes_status_prot *)tlv_data; + u32 start_timestamp; + u32 end_timestamp; + + info[0] = __le32_to_cpu(tx_fes_status->info0); + info[1] = __le32_to_cpu(tx_fes_status->info1); + + start_timestamp = + u32_get_bits(info[0], + HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0); + start_timestamp |= + u32_get_bits(info[0], + HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15; + end_timestamp = + u32_get_bits(info[1], + HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0); + end_timestamp |= + u32_get_bits(info[1], + HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15; + tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp; + + ath12k_dp_mon_tx_gen_prot_frame(tx_ppdu_info); + break; + } + + case HAL_TX_FES_STATUS_START_PPDU: + case HAL_TX_FES_STATUS_START_PROT: { + struct hal_tx_fes_status_start_prot *tx_fes_stat_start = + (struct hal_tx_fes_status_start_prot *)tlv_data; + u64 ppdu_ts; + + info[0] = __le32_to_cpu(tx_fes_stat_start->info0); + + tx_ppdu_info->rx_status.ppdu_ts = + u32_get_bits(info[0], + HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32); + ppdu_ts = (u32_get_bits(info[1], + HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32)); + tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32; + break; + } + + case HAL_TX_FES_STATUS_USER_PPDU: { + struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = + (struct hal_tx_fes_status_user_ppdu *)tlv_data; + + info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0); + + tx_ppdu_info->rx_status.rx_duration = + u32_get_bits(info[0], + HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION); + break; + } + + case HAL_MACTX_HE_SIG_A_SU: + ath12k_dp_mon_parse_he_sig_su(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_A_MU_DL: + ath12k_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B1_MU: + ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B2_MU: + ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B2_OFDMA: + ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_VHT_SIG_A: + ath12k_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_L_SIG_A: + ath12k_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_L_SIG_B: + ath12k_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_RX_FRAME_BITMAP_ACK: { + struct hal_rx_frame_bitmap_ack *fbm_ack = + (struct hal_rx_frame_bitmap_ack *)tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(fbm_ack->info0); + info[1] = __le32_to_cpu(fbm_ack->info1); + + addr_32 = u32_get_bits(info[0], + HAL_RX_FBM_ACK_INFO0_ADDR1_31_0); + addr_16 = u32_get_bits(info[1], + HAL_RX_FBM_ACK_INFO1_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + ath12k_dp_mon_tx_gen_ack_frame(tx_ppdu_info); + break; + } + + case HAL_MACTX_PHY_DESC: { + struct hal_tx_phy_desc *tx_phy_desc = + (struct hal_tx_phy_desc *)tlv_data; + + info[0] = __le32_to_cpu(tx_phy_desc->info0); + info[1] = __le32_to_cpu(tx_phy_desc->info1); + info[2] = __le32_to_cpu(tx_phy_desc->info2); + info[3] = __le32_to_cpu(tx_phy_desc->info3); + + tx_ppdu_info->rx_status.beamformed = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_BF_TYPE); + tx_ppdu_info->rx_status.preamble_type = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B); + tx_ppdu_info->rx_status.mcs = + u32_get_bits(info[1], + HAL_TX_PHY_DESC_INFO1_MCS); + tx_ppdu_info->rx_status.ltf_size = + u32_get_bits(info[3], + HAL_TX_PHY_DESC_INFO3_LTF_SIZE); + tx_ppdu_info->rx_status.nss = + u32_get_bits(info[2], + HAL_TX_PHY_DESC_INFO2_NSS); + tx_ppdu_info->rx_status.chan_num = + u32_get_bits(info[3], + HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL); + tx_ppdu_info->rx_status.bw = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_BANDWIDTH); + break; + } + + case HAL_TX_MPDU_START: { + struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu; + + mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); + if (!mon_mpdu) + return DP_MON_TX_STATUS_PPDU_NOT_DONE; + status = DP_MON_TX_MPDU_START; + break; + } + + case HAL_MON_BUF_ADDR: { + struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring; + struct dp_mon_packet_info *packet_info = + (struct dp_mon_packet_info *)tlv_data; + int buf_id = u32_get_bits(packet_info->cookie, + DP_RXDMA_BUF_COOKIE_BUF_ID); + struct sk_buff *msdu; + struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu; + struct ath12k_skb_rxcb *rxcb; + + spin_lock_bh(&buf_ring->idr_lock); + msdu = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!msdu)) { + ath12k_warn(ab, "montior destination with invalid buf_id %d\n", + buf_id); + return DP_MON_TX_STATUS_PPDU_NOT_DONE; + } + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + if (!mon_mpdu->head) + mon_mpdu->head = msdu; + else if (mon_mpdu->tail) + mon_mpdu->tail->next = msdu; + + mon_mpdu->tail = msdu; + + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + status = DP_MON_TX_BUFFER_ADDR; + break; + } + + case HAL_TX_MPDU_END: + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + break; + } + + return status; +} + +enum dp_mon_tx_tlv_status +ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, + struct hal_tlv_hdr *tx_tlv, + u8 *num_users) +{ + u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + u32 info0; + + switch (tlv_tag) { + case HAL_TX_FES_SETUP: { + struct hal_tx_fes_setup *tx_fes_setup = + (struct hal_tx_fes_setup *)tx_tlv; + + info0 = __le32_to_cpu(tx_fes_setup->info0); + + *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS); + tlv_status = DP_MON_TX_FES_SETUP; + break; + } + + case HAL_RX_RESPONSE_REQUIRED_INFO: { + /* TODO: need to update *num_users */ + tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO; + break; + } + } + + return tlv_status; +} + +static void +ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, + struct napi_struct *napi, + struct dp_mon_tx_ppdu_info *tx_ppdu_info) +{ + struct dp_mon_mpdu *tmp, *mon_mpdu; + struct sk_buff *head_msdu; + + list_for_each_entry_safe(mon_mpdu, tmp, + &tx_ppdu_info->dp_tx_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + head_msdu = mon_mpdu->head; + + if (head_msdu) + ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, + &tx_ppdu_info->rx_status, napi); + + kfree(mon_mpdu); + } +} + +enum hal_rx_mon_status +ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, + struct ath12k_mon_data *pmon, + int mac_id, + struct sk_buff *skb, + struct napi_struct *napi, + u32 ppdu_id) +{ + struct ath12k_base *ab = ar->ab; + struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info; + struct hal_tlv_hdr *tlv; + u8 *ptr = skb->data; + u16 tlv_tag; + u16 tlv_len; + u32 tlv_userid = 0; + u8 num_user; + u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + + tx_prot_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id, + DP_MON_TX_PROT_PPDU_INFO); + if (!tx_prot_ppdu_info) + return -ENOMEM; + + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); + + tlv_status = ath12k_dp_mon_tx_status_get_num_user(tlv_tag, tlv, &num_user); + if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user) + return -EINVAL; + + tx_data_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id, + DP_MON_TX_DATA_PPDU_INFO); + if (!tx_data_ppdu_info) + return -ENOMEM; + + do { + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); + tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN); + tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID); + + tlv_status = ath12k_dp_mon_tx_parse_status_tlv(ab, pmon, + tlv_tag, ptr, + tlv_userid); + ptr += tlv_len; + ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN); + if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE) + break; + } while (tlv_status != DP_MON_TX_FES_STATUS_END); + + ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info); + ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info); + + return tlv_status; +} + +int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, + enum dp_monitor_mode monitor_mode, + struct napi_struct *napi) +{ + struct hal_mon_dest_desc *mon_dst_desc; + struct ath12k_pdev_dp *pdev_dp = &ar->dp; + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb; + struct dp_srng *mon_dst_ring; + struct hal_srng *srng; + struct dp_rxdma_ring *buf_ring; + u64 cookie; + u32 ppdu_id; + int num_buffs_reaped = 0, srng_id, buf_id; + u8 dest_idx = 0, i; + bool end_of_ppdu; + struct hal_rx_mon_ppdu_info *ppdu_info; + struct ath12k_peer *peer = NULL; + + ppdu_info = &pmon->mon_ppdu_info; + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) { + mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; + buf_ring = &dp->rxdma_mon_buf_ring; + } else { + mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id]; + buf_ring = &dp->tx_mon_buf_ring; + } + + srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; + + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + while (likely(*budget)) { + *budget -= 1; + mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); + if (unlikely(!mon_dst_desc)) + break; + + cookie = le32_to_cpu(mon_dst_desc->cookie); + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&buf_ring->idr_lock); + skb = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!skb)) { + ath12k_warn(ab, "montior destination with invalid buf_id %d\n", + buf_id); + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + pmon->dest_skb_q[dest_idx] = skb; + dest_idx++; + ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id); + end_of_ppdu = le32_get_bits(mon_dst_desc->info0, + HAL_MON_DEST_INFO0_END_OF_PPDU); + if (!end_of_ppdu) + continue; + + for (i = 0; i < dest_idx; i++) { + skb = pmon->dest_skb_q[i]; + + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) + ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id, + skb, napi); + else + ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id, + skb, napi, ppdu_id); + + peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); + + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "failed to find the peer with peer_id %d\n", + ppdu_info->peer_id); + dev_kfree_skb_any(skb); + continue; + } + + dev_kfree_skb_any(skb); + pmon->dest_skb_q[i] = NULL; + } + + dest_idx = 0; +move_next: + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ath12k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return num_buffs_reaped; +} + +static void +ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct hal_rx_user_status *user_stats, + u32 num_msdu) +{ + u32 rate_idx = 0; + u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs; + u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1; + u32 bw_idx = ppdu_info->bw; + u32 gi_idx = ppdu_info->gi; + + if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) || + (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) { + return; + } + + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N || + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) { + rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx; + rate_idx += bw_idx * 2 + gi_idx; + } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) { + gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi); + rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx; + rate_idx += bw_idx * 3 + gi_idx; + } else { + return; + } + + rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu; + if (user_stats) + rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count; + else + rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len; +} + +static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, + struct ath12k_sta *arsta, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats; + u32 num_msdu; + + if (!rx_stats) + return; + + arsta->rssi_comb = ppdu_info->rssi_comb; + + num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; + + rx_stats->num_msdu += num_msdu; + rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + + ppdu_info->tcp_ack_msdu_count; + rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; + rx_stats->other_msdu_count += ppdu_info->other_msdu_count; + + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { + ppdu_info->nss = 1; + ppdu_info->mcs = HAL_RX_MAX_MCS; + ppdu_info->tid = IEEE80211_NUM_TIDS; + } + + if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) + rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; + + if (ppdu_info->tid <= IEEE80211_NUM_TIDS) + rx_stats->tid_count[ppdu_info->tid] += num_msdu; + + if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) + rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; + + if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) + rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; + + if (ppdu_info->is_stbc) + rx_stats->stbc_count += num_msdu; + + if (ppdu_info->beamformed) + rx_stats->beamformed_count += num_msdu; + + if (ppdu_info->num_mpdu_fcs_ok > 1) + rx_stats->ampdu_msdu_count += num_msdu; + else + rx_stats->non_ampdu_msdu_count += num_msdu; + + rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; + rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; + rx_stats->dcm_count += ppdu_info->dcm; + + rx_stats->rx_duration += ppdu_info->rx_duration; + arsta->rx_duration = rx_stats->rx_duration; + + if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) { + rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu; + rx_stats->byte_stats.nss_count[ppdu_info->nss - 1] += ppdu_info->mpdu_len; + } + + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N && + ppdu_info->mcs <= HAL_RX_MAX_MCS_HT) { + rx_stats->pkt_stats.ht_mcs_count[ppdu_info->mcs] += num_msdu; + rx_stats->byte_stats.ht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; + /* To fit into rate table for HT packets */ + ppdu_info->mcs = ppdu_info->mcs % 8; + } + + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC && + ppdu_info->mcs <= HAL_RX_MAX_MCS_VHT) { + rx_stats->pkt_stats.vht_mcs_count[ppdu_info->mcs] += num_msdu; + rx_stats->byte_stats.vht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; + } + + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX && + ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) { + rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu; + rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; + } + + if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) && + ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) { + rx_stats->pkt_stats.legacy_count[ppdu_info->rate] += num_msdu; + rx_stats->byte_stats.legacy_count[ppdu_info->rate] += ppdu_info->mpdu_len; + } + + if (ppdu_info->gi < HAL_RX_GI_MAX) { + rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu; + rx_stats->byte_stats.gi_count[ppdu_info->gi] += ppdu_info->mpdu_len; + } + + if (ppdu_info->bw < HAL_RX_BW_MAX) { + rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu; + rx_stats->byte_stats.bw_count[ppdu_info->bw] += ppdu_info->mpdu_len; + } + + ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info, + NULL, num_msdu); +} + +void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_user_status *rx_user_status; + u32 num_users, i, mu_ul_user_v0_word0, mu_ul_user_v0_word1, ru_size; + + if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)) + return; + + num_users = ppdu_info->num_users; + if (num_users > HAL_MAX_UL_MU_USERS) + num_users = HAL_MAX_UL_MU_USERS; + + for (i = 0; i < num_users; i++) { + rx_user_status = &ppdu_info->userstats[i]; + mu_ul_user_v0_word0 = + rx_user_status->ul_ofdma_user_v0_word0; + mu_ul_user_v0_word1 = + rx_user_status->ul_ofdma_user_v0_word1; + + if (u32_get_bits(mu_ul_user_v0_word0, + HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID) && + !u32_get_bits(mu_ul_user_v0_word0, + HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER)) { + rx_user_status->mcs = + u32_get_bits(mu_ul_user_v0_word1, + HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS); + rx_user_status->nss = + u32_get_bits(mu_ul_user_v0_word1, + HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS) + 1; + + rx_user_status->ofdma_info_valid = 1; + rx_user_status->ul_ofdma_ru_start_index = + u32_get_bits(mu_ul_user_v0_word1, + HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START); + + ru_size = u32_get_bits(mu_ul_user_v0_word1, + HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE); + rx_user_status->ul_ofdma_ru_width = ru_size; + rx_user_status->ul_ofdma_ru_size = ru_size; + } + rx_user_status->ldpc = u32_get_bits(mu_ul_user_v0_word1, + HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC); + } + ppdu_info->ldpc = 1; +} + +static void +ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, + struct hal_rx_mon_ppdu_info *ppdu_info, + u32 uid) +{ + struct ath12k_sta *arsta = NULL; + struct ath12k_rx_peer_stats *rx_stats = NULL; + struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid]; + struct ath12k_peer *peer; + u32 num_msdu; + + if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF) + return; + + peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index); + + if (!peer) { + ath12k_warn(ar->ab, "peer ast idx %d can't be found\n", + user_stats->ast_index); + return; + } + + arsta = (struct ath12k_sta *)peer->sta->drv_priv; + rx_stats = arsta->rx_stats; + + if (!rx_stats) + return; + + arsta->rssi_comb = ppdu_info->rssi_comb; + + num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count + + user_stats->udp_msdu_count + user_stats->other_msdu_count; + + rx_stats->num_msdu += num_msdu; + rx_stats->tcp_msdu_count += user_stats->tcp_msdu_count + + user_stats->tcp_ack_msdu_count; + rx_stats->udp_msdu_count += user_stats->udp_msdu_count; + rx_stats->other_msdu_count += user_stats->other_msdu_count; + + if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) + rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; + + if (user_stats->tid <= IEEE80211_NUM_TIDS) + rx_stats->tid_count[user_stats->tid] += num_msdu; + + if (user_stats->preamble_type < HAL_RX_PREAMBLE_MAX) + rx_stats->pream_cnt[user_stats->preamble_type] += num_msdu; + + if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) + rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; + + if (ppdu_info->is_stbc) + rx_stats->stbc_count += num_msdu; + + if (ppdu_info->beamformed) + rx_stats->beamformed_count += num_msdu; + + if (user_stats->mpdu_cnt_fcs_ok > 1) + rx_stats->ampdu_msdu_count += num_msdu; + else + rx_stats->non_ampdu_msdu_count += num_msdu; + + rx_stats->num_mpdu_fcs_ok += user_stats->mpdu_cnt_fcs_ok; + rx_stats->num_mpdu_fcs_err += user_stats->mpdu_cnt_fcs_err; + rx_stats->dcm_count += ppdu_info->dcm; + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO) + rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu; + + rx_stats->rx_duration += ppdu_info->rx_duration; + arsta->rx_duration = rx_stats->rx_duration; + + if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) { + rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu; + rx_stats->byte_stats.nss_count[user_stats->nss - 1] += + user_stats->mpdu_ok_byte_count; + } + + if (user_stats->preamble_type == HAL_RX_PREAMBLE_11AX && + user_stats->mcs <= HAL_RX_MAX_MCS_HE) { + rx_stats->pkt_stats.he_mcs_count[user_stats->mcs] += num_msdu; + rx_stats->byte_stats.he_mcs_count[user_stats->mcs] += + user_stats->mpdu_ok_byte_count; + } + + if (ppdu_info->gi < HAL_RX_GI_MAX) { + rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu; + rx_stats->byte_stats.gi_count[ppdu_info->gi] += + user_stats->mpdu_ok_byte_count; + } + + if (ppdu_info->bw < HAL_RX_BW_MAX) { + rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu; + rx_stats->byte_stats.bw_count[ppdu_info->bw] += + user_stats->mpdu_ok_byte_count; + } + + ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info, + user_stats, num_msdu); +} + +static void +ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + u32 num_users, i; + + num_users = ppdu_info->num_users; + if (num_users > HAL_MAX_UL_MU_USERS) + num_users = HAL_MAX_UL_MU_USERS; + + for (i = 0; i < num_users; i++) + ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i); +} + +int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, + struct napi_struct *napi, int *budget) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev_dp *pdev_dp = &ar->dp; + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + struct ath12k_dp *dp = &ab->dp; + struct hal_mon_dest_desc *mon_dst_desc; + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb; + struct dp_srng *mon_dst_ring; + struct hal_srng *srng; + struct dp_rxdma_ring *buf_ring; + struct ath12k_sta *arsta = NULL; + struct ath12k_peer *peer; + u64 cookie; + int num_buffs_reaped = 0, srng_id, buf_id; + u8 dest_idx = 0, i; + bool end_of_ppdu; + u32 hal_status; + + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; + buf_ring = &dp->rxdma_mon_buf_ring; + + srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + while (likely(*budget)) { + *budget -= 1; + mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); + if (unlikely(!mon_dst_desc)) + break; + cookie = le32_to_cpu(mon_dst_desc->cookie); + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&buf_ring->idr_lock); + skb = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!skb)) { + ath12k_warn(ab, "montior destination with invalid buf_id %d\n", + buf_id); + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + pmon->dest_skb_q[dest_idx] = skb; + dest_idx++; + end_of_ppdu = le32_get_bits(mon_dst_desc->info0, + HAL_MON_DEST_INFO0_END_OF_PPDU); + if (!end_of_ppdu) + continue; + + for (i = 0; i < dest_idx; i++) { + skb = pmon->dest_skb_q[i]; + hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + + if (ppdu_info->peer_id == HAL_INVALID_PEERID || + hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + dev_kfree_skb_any(skb); + continue; + } + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "failed to find the peer with peer_id %d\n", + ppdu_info->peer_id); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + dev_kfree_skb_any(skb); + continue; + } + + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { + arsta = (struct ath12k_sta *)peer->sta->drv_priv; + ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, + ppdu_info); + } else if ((ppdu_info->fc_valid) && + (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { + ath12k_dp_mon_rx_process_ulofdma(ppdu_info); + ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); + } + + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + dev_kfree_skb_any(skb); + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + } + + dest_idx = 0; +move_next: + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ath12k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + return num_buffs_reaped; +} + +int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, + struct napi_struct *napi, int budget, + enum dp_monitor_mode monitor_mode) +{ + struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id); + int num_buffs_reaped = 0; + + if (!ar->monitor_started) + ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget); + else + num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget, + monitor_mode, napi); + + return num_buffs_reaped; +} diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h new file mode 100644 index 000000000000..c18c385798a1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_mon.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_DP_MON_H +#define ATH12K_DP_MON_H + +#include "core.h" + +enum dp_monitor_mode { + ATH12K_DP_TX_MONITOR_MODE, + ATH12K_DP_RX_MONITOR_MODE +}; + +enum dp_mon_tx_ppdu_info_type { + DP_MON_TX_PROT_PPDU_INFO, + DP_MON_TX_DATA_PPDU_INFO +}; + +enum dp_mon_tx_tlv_status { + DP_MON_TX_FES_SETUP, + DP_MON_TX_FES_STATUS_END, + DP_MON_RX_RESPONSE_REQUIRED_INFO, + DP_MON_RESPONSE_END_STATUS_INFO, + DP_MON_TX_MPDU_START, + DP_MON_TX_MSDU_START, + DP_MON_TX_BUFFER_ADDR, + DP_MON_TX_DATA, + DP_MON_TX_STATUS_PPDU_NOT_DONE, +}; + +enum dp_mon_tx_medium_protection_type { + DP_MON_TX_MEDIUM_NO_PROTECTION, + DP_MON_TX_MEDIUM_RTS_LEGACY, + DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW, + DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW, + DP_MON_TX_MEDIUM_CTS2SELF, + DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR, + DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR +}; + +struct dp_mon_qosframe_addr4 { + __le16 frame_control; + __le16 duration; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + __le16 qos_ctrl; +} __packed; + +struct dp_mon_frame_min_one { + __le16 frame_control; + __le16 duration; + u8 addr1[ETH_ALEN]; +} __packed; + +struct dp_mon_packet_info { + u64 cookie; + u16 dma_length; + bool msdu_continuation; + bool truncated; +}; + +struct dp_mon_tx_ppdu_info { + u32 ppdu_id; + u8 num_users; + bool is_used; + struct hal_rx_mon_ppdu_info rx_status; + struct list_head dp_tx_mon_mpdu_list; + struct dp_mon_mpdu *tx_mon_mpdu; +}; + +enum hal_rx_mon_status +ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, + struct ath12k_mon_data *pmon, + int mac_id, struct sk_buff *skb, + struct napi_struct *napi); +int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, + struct dp_rxdma_ring *buf_ring, + int req_entries); +int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, + int *budget, enum dp_monitor_mode monitor_mode, + struct napi_struct *napi); +int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, + struct napi_struct *napi, int budget, + enum dp_monitor_mode monitor_mode); +struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void); +enum dp_mon_tx_tlv_status +ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, + struct hal_tlv_hdr *tx_tlv, + u8 *num_users); +enum hal_rx_mon_status +ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, + struct ath12k_mon_data *pmon, + int mac_id, + struct sk_buff *skb, + struct napi_struct *napi, + u32 ppdu_id); +void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info); +int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, + struct napi_struct *napi, int *budget); +#endif diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c new file mode 100644 index 000000000000..83a43ad48c51 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -0,0 +1,4234 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/ieee80211.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <crypto/hash.h> +#include "core.h" +#include "debug.h" +#include "hal_desc.h" +#include "hw.h" +#include "dp_rx.h" +#include "hal_rx.h" +#include "dp_tx.h" +#include "peer.h" +#include "dp_mon.h" + +#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) + +static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc)) + return HAL_ENCRYPT_TYPE_OPEN; + + return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc); +} + +u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc); +} + +static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc); +} + +static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); +} + +static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc); +} + +static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz); + return ieee80211_has_morefrags(hdr->frame_control); +} + +static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz); + return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; +} + +static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc); +} + +static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc); +} + +static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc); +} + +static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc); +} + +static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc); +} + +u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc); +} + +static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc); +} + +static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc); +} + +static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc); +} + +static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc); +} + +static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc); +} + +static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc); +} + +static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc)); +} + +static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc); +} + +static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc); +} + +u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc); +} + +static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc); +} + +static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc); +} + +static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, + struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc); +} + +static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, + struct hal_rx_desc *desc, + u16 len) +{ + ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len); +} + +static bool ath12k_dp_rx_h_is_mcbc(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_is_mcbc(desc); +} + +static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc); +} + +static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc); +} + +static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, + struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) +{ + ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr); +} + +static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, + struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) +{ + ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); +} + +static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc); +} + +static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab) +{ + int i, reaped = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); + + do { + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) + reaped += ath12k_dp_mon_process_ring(ab, i, NULL, + DP_MON_SERVICE_BUDGET, + ATH12K_DP_RX_MONITOR_MODE); + + /* nothing more to reap */ + if (reaped < DP_MON_SERVICE_BUDGET) + return 0; + + } while (time_before(jiffies, timeout)); + + ath12k_warn(ab, "dp mon ring purge timeout"); + + return -ETIMEDOUT; +} + +/* Returns number of Rx buffers replenished */ +int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + bool hw_cc) +{ + struct ath12k_buffer_addr *desc; + struct hal_srng *srng; + struct sk_buff *skb; + int num_free; + int num_remain; + int buf_id; + u32 cookie; + dma_addr_t paddr; + struct ath12k_dp *dp = &ab->dp; + struct ath12k_rx_desc_info *rx_desc; + + req_entries = min(req_entries, rx_ring->bufs_max); + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + num_free = ath12k_hal_srng_src_num_free(ab, srng, true); + if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) + req_entries = num_free; + + req_entries = min(num_free, req_entries); + num_remain = req_entries; + + while (num_remain > 0) { + skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + + DP_RX_BUFFER_ALIGN_SIZE); + if (!skb) + break; + + if (!IS_ALIGNED((unsigned long)skb->data, + DP_RX_BUFFER_ALIGN_SIZE)) { + skb_pull(skb, + PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ab->dev, paddr)) + goto fail_free_skb; + + if (hw_cc) { + spin_lock_bh(&dp->rx_desc_lock); + + /* Get desc from free list and store in used list + * for cleanup purposes + * + * TODO: pass the removed descs rather than + * add/read to optimize + */ + rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list, + struct ath12k_rx_desc_info, + list); + if (!rx_desc) { + spin_unlock_bh(&dp->rx_desc_lock); + goto fail_dma_unmap; + } + + rx_desc->skb = skb; + cookie = rx_desc->cookie; + list_del(&rx_desc->list); + list_add_tail(&rx_desc->list, &dp->rx_desc_used_list); + + spin_unlock_bh(&dp->rx_desc_lock); + } else { + spin_lock_bh(&rx_ring->idr_lock); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max * 3, GFP_ATOMIC); + spin_unlock_bh(&rx_ring->idr_lock); + if (buf_id < 0) + goto fail_dma_unmap; + cookie = u32_encode_bits(mac_id, + DP_RXDMA_BUF_COOKIE_PDEV_ID) | + u32_encode_bits(buf_id, + DP_RXDMA_BUF_COOKIE_BUF_ID); + } + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) + goto fail_buf_unassign; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + + num_remain--; + + ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; + +fail_buf_unassign: + if (hw_cc) { + spin_lock_bh(&dp->rx_desc_lock); + list_del(&rx_desc->list); + list_add_tail(&rx_desc->list, &dp->rx_desc_free_list); + rx_desc->skb = NULL; + spin_unlock_bh(&dp->rx_desc_lock); + } else { + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + } +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; +} + +static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab, + struct dp_rxdma_ring *rx_ring) +{ + struct sk_buff *skb; + int buf_id; + + spin_lock_bh(&rx_ring->idr_lock); + idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { + idr_remove(&rx_ring->bufs_idr, buf_id); + /* TODO: Understand where internal driver does this dma_unmap + * of rxdma_buffer. + */ + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + + idr_destroy(&rx_ring->bufs_idr); + spin_unlock_bh(&rx_ring->idr_lock); + + return 0; +} + +static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + + ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); + + rx_ring = &dp->rxdma_mon_buf_ring; + ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); + + rx_ring = &dp->tx_mon_buf_ring; + ath12k_dp_rxdma_buf_ring_free(ab, rx_ring); + + return 0; +} + +static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, + struct dp_rxdma_ring *rx_ring, + u32 ringtype) +{ + int num_entries; + + num_entries = rx_ring->refill_buf_ring.size / + ath12k_hal_srng_get_entrysize(ab, ringtype); + + rx_ring->bufs_max = num_entries; + if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF)) + ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); + else + ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries, + ab->hw_params->hal_params->rx_buf_rbm, + ringtype == HAL_RXDMA_BUF); + return 0; +} + +static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + int ret; + + ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, + HAL_RXDMA_BUF); + if (ret) { + ath12k_warn(ab, + "failed to setup HAL_RXDMA_BUF\n"); + return ret; + } + + if (ab->hw_params->rxdma1_enable) { + rx_ring = &dp->rxdma_mon_buf_ring; + ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, + HAL_RXDMA_MONITOR_BUF); + if (ret) { + ath12k_warn(ab, + "failed to setup HAL_RXDMA_MONITOR_BUF\n"); + return ret; + } + + rx_ring = &dp->tx_mon_buf_ring; + ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring, + HAL_TX_MONITOR_BUF); + if (ret) { + ath12k_warn(ab, + "failed to setup HAL_TX_MONITOR_BUF\n"); + return ret; + } + } + + return 0; +} + +static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) +{ + struct ath12k_pdev_dp *dp = &ar->dp; + struct ath12k_base *ab = ar->ab; + int i; + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); + ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]); + } +} + +void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i; + + for (i = 0; i < DP_REO_DST_RING_MAX; i++) + ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); +} + +int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int ret; + int i; + + for (i = 0; i < DP_REO_DST_RING_MAX; i++) { + ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], + HAL_REO_DST, i, 0, + DP_REO_DST_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup reo_dst_ring\n"); + goto err_reo_cleanup; + } + } + + return 0; + +err_reo_cleanup: + ath12k_dp_rx_pdev_reo_cleanup(ab); + + return ret; +} + +static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) +{ + struct ath12k_pdev_dp *dp = &ar->dp; + struct ath12k_base *ab = ar->ab; + int i; + int ret; + u32 mac_id = dp->mac_id; + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ret = ath12k_dp_srng_setup(ar->ab, + &dp->rxdma_mon_dst_ring[i], + HAL_RXDMA_MONITOR_DST, + 0, mac_id + i, + DP_RXDMA_MONITOR_DST_RING_SIZE); + if (ret) { + ath12k_warn(ar->ab, + "failed to setup HAL_RXDMA_MONITOR_DST\n"); + return ret; + } + + ret = ath12k_dp_srng_setup(ar->ab, + &dp->tx_mon_dst_ring[i], + HAL_TX_MONITOR_DST, + 0, mac_id + i, + DP_TX_MONITOR_DEST_RING_SIZE); + if (ret) { + ath12k_warn(ar->ab, + "failed to setup HAL_TX_MONITOR_DST\n"); + return ret; + } + } + + return 0; +} + +void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct ath12k_dp_rx_reo_cmd *cmd, *tmp; + struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; + + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { + list_del(&cmd->list); + dma_unmap_single(ab->dev, cmd->data.paddr, + cmd->data.size, DMA_BIDIRECTIONAL); + kfree(cmd->data.vaddr); + kfree(cmd); + } + + list_for_each_entry_safe(cmd_cache, tmp_cache, + &dp->reo_cmd_cache_flush_list, list) { + list_del(&cmd_cache->list); + dp->reo_cmd_cache_flush_count--; + dma_unmap_single(ab->dev, cmd_cache->data.paddr, + cmd_cache->data.size, DMA_BIDIRECTIONAL); + kfree(cmd_cache->data.vaddr); + kfree(cmd_cache); + } + spin_unlock_bh(&dp->reo_cmd_lock); +} + +static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct ath12k_dp_rx_tid *rx_tid = ctx; + + if (status != HAL_REO_CMD_SUCCESS) + ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", + rx_tid->tid, status); + + dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, + DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; +} + +static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd, + void (*cb)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status)) +{ + struct ath12k_dp *dp = &ab->dp; + struct ath12k_dp_rx_reo_cmd *dp_cmd; + struct hal_srng *cmd_ring; + int cmd_num; + + cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; + cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); + + /* cmd_num should start from 1, during failure return the error code */ + if (cmd_num < 0) + return cmd_num; + + /* reo cmd ring descriptors has cmd_num starting from 1 */ + if (cmd_num == 0) + return -EINVAL; + + if (!cb) + return 0; + + /* Can this be optimized so that we keep the pending command list only + * for tid delete command to free up the resource on the command status + * indication? + */ + dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); + + if (!dp_cmd) + return -ENOMEM; + + memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); + dp_cmd->cmd_num = cmd_num; + dp_cmd->handler = cb; + + spin_lock_bh(&dp->reo_cmd_lock); + list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); + spin_unlock_bh(&dp->reo_cmd_lock); + + return 0; +} + +static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, + struct ath12k_dp_rx_tid *rx_tid) +{ + struct ath12k_hal_reo_cmd cmd = {0}; + unsigned long tot_desc_sz, desc_sz; + int ret; + + tot_desc_sz = rx_tid->size; + desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); + + while (tot_desc_sz > desc_sz) { + tot_desc_sz -= desc_sz; + cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + ret = ath12k_dp_reo_cmd_send(ab, rx_tid, + HAL_REO_CMD_FLUSH_CACHE, &cmd, + NULL); + if (ret) + ath12k_warn(ab, + "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", + rx_tid->tid, ret); + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + ret = ath12k_dp_reo_cmd_send(ab, rx_tid, + HAL_REO_CMD_FLUSH_CACHE, + &cmd, ath12k_dp_reo_cmd_free); + if (ret) { + ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", + rx_tid->tid, ret); + dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, + DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; + } +} + +static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct ath12k_base *ab = dp->ab; + struct ath12k_dp_rx_tid *rx_tid = ctx; + struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; + + if (status == HAL_REO_CMD_DRAIN) { + goto free_desc; + } else if (status != HAL_REO_CMD_SUCCESS) { + /* Shouldn't happen! Cleanup in case of other failure? */ + ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", + rx_tid->tid, status); + return; + } + + elem = kzalloc(sizeof(*elem), GFP_ATOMIC); + if (!elem) + goto free_desc; + + elem->ts = jiffies; + memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); + + spin_lock_bh(&dp->reo_cmd_lock); + list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); + dp->reo_cmd_cache_flush_count++; + + /* Flush and invalidate aged REO desc from HW cache */ + list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, + list) { + if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || + time_after(jiffies, elem->ts + + msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { + list_del(&elem->list); + dp->reo_cmd_cache_flush_count--; + + /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() + * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list + * is used in only two contexts, one is in this function called + * from napi and the other in ath12k_dp_free during core destroy. + * Before dp_free, the irqs would be disabled and would wait to + * synchronize. Hence there wouldn’t be any race against add or + * delete to this list. Hence unlock-lock is safe here. + */ + spin_unlock_bh(&dp->reo_cmd_lock); + + ath12k_dp_reo_cache_flush(ab, &elem->data); + kfree(elem); + spin_lock_bh(&dp->reo_cmd_lock); + } + } + spin_unlock_bh(&dp->reo_cmd_lock); + + return; +free_desc: + dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, + DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; +} + +static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, + dma_addr_t paddr) +{ + struct ath12k_reo_queue_ref *qref; + struct ath12k_dp *dp = &ab->dp; + + if (!ab->hw_params->reoq_lut_support) + return; + + /* TODO: based on ML peer or not, select the LUT. below assumes non + * ML peer + */ + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + + qref->info0 = u32_encode_bits(lower_32_bits(paddr), + BUFFER_ADDR_INFO0_ADDR); + qref->info1 = u32_encode_bits(upper_32_bits(paddr), + BUFFER_ADDR_INFO1_ADDR) | + u32_encode_bits(tid, DP_REO_QREF_NUM); +} + +static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) +{ + struct ath12k_reo_queue_ref *qref; + struct ath12k_dp *dp = &ab->dp; + + if (!ab->hw_params->reoq_lut_support) + return; + + /* TODO: based on ML peer or not, select the LUT. below assumes non + * ML peer + */ + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + + qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); + qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | + u32_encode_bits(tid, DP_REO_QREF_NUM); +} + +void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, + struct ath12k_peer *peer, u8 tid) +{ + struct ath12k_hal_reo_cmd cmd = {0}; + struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + int ret; + + if (!rx_tid->active) + return; + + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.upd0 = HAL_REO_CMD_UPD0_VLD; + ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, + HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, + ath12k_dp_rx_tid_del_func); + if (ret) { + ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", + tid, ret); + dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, + DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; + } + + ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); + + rx_tid->active = false; +} + +/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted + * to struct hal_wbm_release_ring, I couldn't figure out the logic behind + * that. + */ +static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, + struct hal_reo_dest_ring *ring, + enum hal_wbm_rel_bm_act action) +{ + struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring; + struct hal_wbm_release_ring *desc; + struct ath12k_dp *dp = &ab->dp; + struct hal_srng *srng; + int ret = 0; + + srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -ENOBUFS; + goto exit; + } + + ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action); + +exit: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return ret; +} + +static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, + bool rel_link_desc) +{ + struct ath12k_base *ab = rx_tid->ab; + + lockdep_assert_held(&ab->base_lock); + + if (rx_tid->dst_ring_desc) { + if (rel_link_desc) + ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + kfree(rx_tid->dst_ring_desc); + rx_tid->dst_ring_desc = NULL; + } + + rx_tid->cur_sn = 0; + rx_tid->last_frag_no = 0; + rx_tid->rx_frag_bitmap = 0; + __skb_queue_purge(&rx_tid->rx_frags); +} + +void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) +{ + struct ath12k_dp_rx_tid *rx_tid; + int i; + + lockdep_assert_held(&ar->ab->base_lock); + + for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { + rx_tid = &peer->rx_tid[i]; + + ath12k_dp_rx_peer_tid_delete(ar, peer, i); + ath12k_dp_rx_frags_cleanup(rx_tid, true); + + spin_unlock_bh(&ar->ab->base_lock); + del_timer_sync(&rx_tid->frag_timer); + spin_lock_bh(&ar->ab->base_lock); + } +} + +static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, + struct ath12k_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn, + bool update_ssn) +{ + struct ath12k_hal_reo_cmd cmd = {0}; + int ret; + + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; + cmd.ba_window_size = ba_win_sz; + + if (update_ssn) { + cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; + cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); + } + + ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, + HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, + NULL); + if (ret) { + ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", + rx_tid->tid, ret); + return ret; + } + + rx_tid->ba_win_sz = ba_win_sz; + + return 0; +} + +int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, + u8 tid, u32 ba_win_sz, u16 ssn, + enum hal_pn_type pn_type) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct hal_rx_reo_queue *addr_aligned; + struct ath12k_peer *peer; + struct ath12k_dp_rx_tid *rx_tid; + u32 hw_desc_sz; + void *vaddr; + dma_addr_t paddr; + int ret; + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find(ab, vdev_id, peer_mac); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); + return -ENOENT; + } + + if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) { + spin_unlock_bh(&ab->base_lock); + ath12k_warn(ab, "reo qref table is not setup\n"); + return -EINVAL; + } + + if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { + ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", + peer->peer_id, tid); + spin_unlock_bh(&ab->base_lock); + return -EINVAL; + } + + rx_tid = &peer->rx_tid[tid]; + /* Update the tid queue if it is already setup */ + if (rx_tid->active) { + paddr = rx_tid->paddr; + ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, + ba_win_sz, ssn, true); + spin_unlock_bh(&ab->base_lock); + if (ret) { + ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); + return ret; + } + + return ret; + } + + rx_tid->tid = tid; + + rx_tid->ba_win_sz = ba_win_sz; + + /* TODO: Optimize the memory allocation for qos tid based on + * the actual BA window size in REO tid update path. + */ + if (tid == HAL_DESC_REO_NON_QOS_TID) + hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); + else + hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); + + vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); + if (!vaddr) { + spin_unlock_bh(&ab->base_lock); + return -ENOMEM; + } + + addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); + + ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, + ssn, pn_type); + + paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, + DMA_BIDIRECTIONAL); + + ret = dma_mapping_error(ab->dev, paddr); + if (ret) { + spin_unlock_bh(&ab->base_lock); + goto err_mem_free; + } + + rx_tid->vaddr = vaddr; + rx_tid->paddr = paddr; + rx_tid->size = hw_desc_sz; + rx_tid->active = true; + + if (ab->hw_params->reoq_lut_support) { + /* Update the REO queue LUT at the corresponding peer id + * and tid with qaddr. + */ + ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr); + spin_unlock_bh(&ab->base_lock); + } else { + spin_unlock_bh(&ab->base_lock); + ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, + paddr, tid, 1, ba_win_sz); + } + + return ret; + +err_mem_free: + kfree(vaddr); + + return ret; +} + +int ath12k_dp_rx_ampdu_start(struct ath12k *ar, + struct ieee80211_ampdu_params *params) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + int vdev_id = arsta->arvif->vdev_id; + int ret; + + ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id, + params->tid, params->buf_size, + params->ssn, arsta->pn_type); + if (ret) + ath12k_warn(ab, "failed to setup rx tid %d\n", ret); + + return ret; +} + +int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, + struct ieee80211_ampdu_params *params) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + int vdev_id = arsta->arvif->vdev_id; + bool active; + int ret; + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find(ab, vdev_id, params->sta->addr); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); + return -ENOENT; + } + + active = peer->rx_tid[params->tid].active; + + if (!active) { + spin_unlock_bh(&ab->base_lock); + return 0; + } + + ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); + spin_unlock_bh(&ab->base_lock); + if (ret) { + ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", + params->tid, ret); + return ret; + } + + return ret; +} + +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, + const u8 *peer_addr, + enum set_key_cmd key_cmd, + struct ieee80211_key_conf *key) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ath12k_hal_reo_cmd cmd = {0}; + struct ath12k_peer *peer; + struct ath12k_dp_rx_tid *rx_tid; + u8 tid; + int ret = 0; + + /* NOTE: Enable PN/TSC replay check offload only for unicast frames. + * We use mac80211 PN/TSC replay check functionality for bcast/mcast + * for now. + */ + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return 0; + + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.upd0 = HAL_REO_CMD_UPD0_PN | + HAL_REO_CMD_UPD0_PN_SIZE | + HAL_REO_CMD_UPD0_PN_VALID | + HAL_REO_CMD_UPD0_PN_CHECK | + HAL_REO_CMD_UPD0_SVLD; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (key_cmd == SET_KEY) { + cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; + cmd.pn_size = 48; + } + break; + default: + break; + } + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", + peer_addr); + return -ENOENT; + } + + for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { + rx_tid = &peer->rx_tid[tid]; + if (!rx_tid->active) + continue; + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + ret = ath12k_dp_reo_cmd_send(ab, rx_tid, + HAL_REO_CMD_UPDATE_RX_QUEUE, + &cmd, NULL); + if (ret) { + ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", + tid, peer_addr, ret); + break; + } + } + + spin_unlock_bh(&ab->base_lock); + + return ret; +} + +static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, + u16 peer_id) +{ + int i; + + for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { + if (ppdu_stats->user_stats[i].is_valid_peer_id) { + if (peer_id == ppdu_stats->user_stats[i].peer_id) + return i; + } else { + return i; + } + } + + return -EINVAL; +} + +static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, + u16 tag, u16 len, const void *ptr, + void *data) +{ + const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; + const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; + const struct htt_ppdu_stats_user_rate *user_rate; + struct htt_ppdu_stats_info *ppdu_info; + struct htt_ppdu_user_stats *user_stats; + int cur_user; + u16 peer_id; + + ppdu_info = data; + + switch (tag) { + case HTT_PPDU_STATS_TAG_COMMON: + if (len < sizeof(struct htt_ppdu_stats_common)) { + ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", + len, tag); + return -EINVAL; + } + memcpy(&ppdu_info->ppdu_stats.common, ptr, + sizeof(struct htt_ppdu_stats_common)); + break; + case HTT_PPDU_STATS_TAG_USR_RATE: + if (len < sizeof(struct htt_ppdu_stats_user_rate)) { + ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", + len, tag); + return -EINVAL; + } + user_rate = ptr; + peer_id = le16_to_cpu(user_rate->sw_peer_id); + cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -EINVAL; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy(&user_stats->rate, ptr, + sizeof(struct htt_ppdu_stats_user_rate)); + user_stats->tlv_flags |= BIT(tag); + break; + case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: + if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { + ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", + len, tag); + return -EINVAL; + } + + cmplt_cmn = ptr; + peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); + cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -EINVAL; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy(&user_stats->cmpltn_cmn, ptr, + sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); + user_stats->tlv_flags |= BIT(tag); + break; + case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: + if (len < + sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { + ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", + len, tag); + return -EINVAL; + } + + ba_status = ptr; + peer_id = le16_to_cpu(ba_status->sw_peer_id); + cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -EINVAL; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy(&user_stats->ack_ba, ptr, + sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); + user_stats->tlv_flags |= BIT(tag); + break; + } + return 0; +} + +static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) +{ + const struct htt_tlv *tlv; + const void *begin = ptr; + u16 tlv_tag, tlv_len; + int ret = -EINVAL; + + while (len > 0) { + if (len < sizeof(*tlv)) { + ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", + ptr - begin, len, sizeof(*tlv)); + return -EINVAL; + } + tlv = (struct htt_tlv *)ptr; + tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); + tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); + ptr += sizeof(*tlv); + len -= sizeof(*tlv); + + if (tlv_len > len) { + ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", + tlv_tag, ptr - begin, len, tlv_len); + return -EINVAL; + } + ret = iter(ab, tlv_tag, tlv_len, ptr, data); + if (ret == -ENOMEM) + return ret; + + ptr += tlv_len; + len -= tlv_len; + } + return 0; +} + +static void +ath12k_update_per_peer_tx_stats(struct ath12k *ar, + struct htt_ppdu_stats *ppdu_stats, u8 user) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + struct ieee80211_sta *sta; + struct ath12k_sta *arsta; + struct htt_ppdu_stats_user_rate *user_rate; + struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; + struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; + struct htt_ppdu_stats_common *common = &ppdu_stats->common; + int ret; + u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; + u32 v, succ_bytes = 0; + u16 tones, rate = 0, succ_pkts = 0; + u32 tx_duration = 0; + u8 tid = HTT_PPDU_STATS_NON_QOS_TID; + bool is_ampdu = false; + + if (!usr_stats) + return; + + if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) + return; + + if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) + is_ampdu = + HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); + + if (usr_stats->tlv_flags & + BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { + succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); + succ_pkts = le32_get_bits(usr_stats->ack_ba.info, + HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); + tid = le32_get_bits(usr_stats->ack_ba.info, + HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); + } + + if (common->fes_duration_us) + tx_duration = le32_to_cpu(common->fes_duration_us); + + user_rate = &usr_stats->rate; + flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); + bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; + nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; + mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); + sgi = HTT_USR_RATE_GI(user_rate->rate_flags); + dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); + + /* Note: If host configured fixed rates and in some other special + * cases, the broadcast/management frames are sent in different rates. + * Firmware rate's control to be skipped for this? + */ + + if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { + ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); + return; + } + + if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { + ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); + return; + } + + if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { + ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); + return; + } + + if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { + ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", + mcs, nss); + return; + } + + if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { + ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, + flags, + &rate_idx, + &rate); + if (ret < 0) + return; + } + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); + + if (!peer || !peer->sta) { + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + + sta = peer->sta; + arsta = (struct ath12k_sta *)sta->drv_priv; + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + + switch (flags) { + case WMI_RATE_PREAMBLE_OFDM: + arsta->txrate.legacy = rate; + break; + case WMI_RATE_PREAMBLE_CCK: + arsta->txrate.legacy = rate; + break; + case WMI_RATE_PREAMBLE_HT: + arsta->txrate.mcs = mcs + 8 * (nss - 1); + arsta->txrate.flags = RATE_INFO_FLAGS_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case WMI_RATE_PREAMBLE_VHT: + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case WMI_RATE_PREAMBLE_HE: + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; + arsta->txrate.he_dcm = dcm; + arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); + tones = le16_to_cpu(user_rate->ru_end) - + le16_to_cpu(user_rate->ru_start) + 1; + v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); + arsta->txrate.he_ru_alloc = v; + break; + } + + arsta->txrate.nss = nss; + arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); + arsta->tx_duration += tx_duration; + memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); + + /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. + * So skip peer stats update for mgmt packets. + */ + if (tid < HTT_PPDU_STATS_NON_QOS_TID) { + memset(peer_stats, 0, sizeof(*peer_stats)); + peer_stats->succ_pkts = succ_pkts; + peer_stats->succ_bytes = succ_bytes; + peer_stats->is_ampdu = is_ampdu; + peer_stats->duration = tx_duration; + peer_stats->ba_fails = + HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + + HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); + } + + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +} + +static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, + struct htt_ppdu_stats *ppdu_stats) +{ + u8 user; + + for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) + ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); +} + +static +struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, + u32 ppdu_id) +{ + struct htt_ppdu_stats_info *ppdu_info; + + lockdep_assert_held(&ar->data_lock); + if (!list_empty(&ar->ppdu_stats_info)) { + list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { + if (ppdu_info->ppdu_id == ppdu_id) + return ppdu_info; + } + + if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { + ppdu_info = list_first_entry(&ar->ppdu_stats_info, + typeof(*ppdu_info), list); + list_del(&ppdu_info->list); + ar->ppdu_stat_list_depth--; + ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); + kfree(ppdu_info); + } + } + + ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); + if (!ppdu_info) + return NULL; + + list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); + ar->ppdu_stat_list_depth++; + + return ppdu_info; +} + +static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, + struct htt_ppdu_user_stats *usr_stats) +{ + peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); + peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); + peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); + peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); + peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); + peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); + peer->ppdu_stats_delayba.resp_rate_flags = + le32_to_cpu(usr_stats->rate.resp_rate_flags); + + peer->delayba_flag = true; +} + +static void ath12k_copy_to_bar(struct ath12k_peer *peer, + struct htt_ppdu_user_stats *usr_stats) +{ + usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); + usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); + usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); + usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); + usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); + usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); + usr_stats->rate.resp_rate_flags = + cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); + + peer->delayba_flag = false; +} + +static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_htt_ppdu_stats_msg *msg; + struct htt_ppdu_stats_info *ppdu_info; + struct ath12k_peer *peer = NULL; + struct htt_ppdu_user_stats *usr_stats = NULL; + u32 peer_id = 0; + struct ath12k *ar; + int ret, i; + u8 pdev_id; + u32 ppdu_id, len; + + msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; + len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); + pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); + ppdu_id = le32_to_cpu(msg->ppdu_id); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + if (!ar) { + ret = -EINVAL; + goto exit; + } + + spin_lock_bh(&ar->data_lock); + ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); + if (!ppdu_info) { + spin_unlock_bh(&ar->data_lock); + ret = -EINVAL; + goto exit; + } + + ppdu_info->ppdu_id = ppdu_id; + ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, + ath12k_htt_tlv_ppdu_stats_parse, + (void *)ppdu_info); + if (ret) { + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ab, "Failed to parse tlv %d\n", ret); + goto exit; + } + + /* back up data rate tlv for all peers */ + if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && + (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && + ppdu_info->delay_ba) { + for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { + peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, peer_id); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + continue; + } + + usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; + if (usr_stats->delay_ba) + ath12k_copy_to_delay_stats(peer, usr_stats); + spin_unlock_bh(&ab->base_lock); + } + } + + /* restore all peers' data rate tlv to mu-bar tlv */ + if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && + (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { + for (i = 0; i < ppdu_info->bar_num_users; i++) { + peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, peer_id); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + continue; + } + + usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; + if (peer->delayba_flag) + ath12k_copy_to_bar(peer, usr_stats); + spin_unlock_bh(&ab->base_lock); + } + } + + spin_unlock_bh(&ar->data_lock); + +exit: + rcu_read_unlock(); + + return ret; +} + +static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_htt_mlo_offset_msg *msg; + struct ath12k_pdev *pdev; + struct ath12k *ar; + u8 pdev_id; + + msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; + pdev_id = u32_get_bits(__le32_to_cpu(msg->info), + HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + + if (!ar) { + ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id); + return; + } + + spin_lock_bh(&ar->data_lock); + pdev = ar->pdev; + + pdev->timestamp.info = __le32_to_cpu(msg->info); + pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); + pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); + pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); + pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); + pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); + pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); + pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); + + spin_unlock_bh(&ar->data_lock); +} + +void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_dp *dp = &ab->dp; + struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; + enum htt_t2h_msg_type type; + u16 peer_id; + u8 vdev_id; + u8 mac_addr[ETH_ALEN]; + u16 peer_mac_h16; + u16 ast_hash = 0; + u16 hw_peer_id; + + type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); + + ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); + + switch (type) { + case HTT_T2H_MSG_TYPE_VERSION_CONF: + dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, + HTT_T2H_VERSION_CONF_MAJOR); + dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, + HTT_T2H_VERSION_CONF_MINOR); + complete(&dp->htt_tgt_version_received); + break; + /* TODO: remove unused peer map versions after testing */ + case HTT_T2H_MSG_TYPE_PEER_MAP: + vdev_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_VDEV_ID); + peer_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_PEER_ID); + peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, + HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); + ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), + peer_mac_h16, mac_addr); + ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); + break; + case HTT_T2H_MSG_TYPE_PEER_MAP2: + vdev_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_VDEV_ID); + peer_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_PEER_ID); + peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, + HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); + ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), + peer_mac_h16, mac_addr); + ast_hash = le32_get_bits(resp->peer_map_ev.info2, + HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); + hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, + HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); + ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, + hw_peer_id); + break; + case HTT_T2H_MSG_TYPE_PEER_MAP3: + vdev_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_VDEV_ID); + peer_id = le32_get_bits(resp->peer_map_ev.info, + HTT_T2H_PEER_MAP_INFO_PEER_ID); + peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, + HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); + ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), + peer_mac_h16, mac_addr); + ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, + peer_id); + break; + case HTT_T2H_MSG_TYPE_PEER_UNMAP: + case HTT_T2H_MSG_TYPE_PEER_UNMAP2: + peer_id = le32_get_bits(resp->peer_unmap_ev.info, + HTT_T2H_PEER_UNMAP_INFO_PEER_ID); + ath12k_peer_unmap_event(ab, peer_id); + break; + case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: + ath12k_htt_pull_ppdu_stats(ab, skb); + break; + case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + break; + case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: + ath12k_htt_mlo_offset_event_handler(ab, skb); + break; + default: + ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", + type); + break; + } + + dev_kfree_skb_any(skb); +} + +static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, + struct sk_buff_head *msdu_list, + struct sk_buff *first, struct sk_buff *last, + u8 l3pad_bytes, int msdu_len) +{ + struct ath12k_base *ab = ar->ab; + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); + int buf_first_hdr_len, buf_first_len; + struct hal_rx_desc *ldesc; + int space_extra, rem_len, buf_len; + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + /* As the msdu is spread across multiple rx buffers, + * find the offset to the start of msdu for computing + * the length of the msdu in the first buffer. + */ + buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; + buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; + + if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { + skb_put(first, buf_first_hdr_len + msdu_len); + skb_pull(first, buf_first_hdr_len); + return 0; + } + + ldesc = (struct hal_rx_desc *)last->data; + rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); + rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); + + /* MSDU spans over multiple buffers because the length of the MSDU + * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data + * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. + */ + skb_put(first, DP_RX_BUFFER_SIZE); + skb_pull(first, buf_first_hdr_len); + + /* When an MSDU spread over multiple buffers MSDU_END + * tlvs are valid only in the last buffer. Copy those tlvs. + */ + ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); + + space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); + if (space_extra > 0 && + (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { + /* Free up all buffers of the MSDU */ + while ((skb = __skb_dequeue(msdu_list)) != NULL) { + rxcb = ATH12K_SKB_RXCB(skb); + if (!rxcb->is_continuation) { + dev_kfree_skb_any(skb); + break; + } + dev_kfree_skb_any(skb); + } + return -ENOMEM; + } + + rem_len = msdu_len - buf_first_len; + while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->is_continuation) + buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; + else + buf_len = rem_len; + + if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { + WARN_ON_ONCE(1); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + skb_put(skb, buf_len + hal_rx_desc_sz); + skb_pull(skb, hal_rx_desc_sz); + skb_copy_from_linear_data(skb, skb_put(first, buf_len), + buf_len); + dev_kfree_skb_any(skb); + + rem_len -= buf_len; + if (!rxcb->is_continuation) + break; + } + + return 0; +} + +static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, + struct sk_buff *first) +{ + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); + + if (!rxcb->is_continuation) + return first; + + skb_queue_walk(msdu_list, skb) { + rxcb = ATH12K_SKB_RXCB(skb); + if (!rxcb->is_continuation) + return skb; + } + + return NULL; +} + +static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu) +{ + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct ath12k_base *ab = ar->ab; + bool ip_csum_fail, l4_csum_fail; + + ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc); + l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc); + + msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? + CHECKSUM_NONE : CHECKSUM_UNNECESSARY; +} + +static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + return 0; + case HAL_ENCRYPT_TYPE_CCMP_128: + return IEEE80211_CCMP_MIC_LEN; + case HAL_ENCRYPT_TYPE_CCMP_256: + return IEEE80211_CCMP_256_MIC_LEN; + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + return IEEE80211_GCMP_MIC_LEN; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + break; + } + + ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); + return 0; +} + +static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return 0; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + return IEEE80211_TKIP_IV_LEN; + case HAL_ENCRYPT_TYPE_CCMP_128: + return IEEE80211_CCMP_HDR_LEN; + case HAL_ENCRYPT_TYPE_CCMP_256: + return IEEE80211_CCMP_256_HDR_LEN; + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + return IEEE80211_GCMP_HDR_LEN; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + break; + } + + ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); + return 0; +} + +static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + return 0; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + return IEEE80211_TKIP_ICV_LEN; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + break; + } + + ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); + return 0; +} + +static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, + struct sk_buff *msdu, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; + struct ieee80211_hdr *hdr; + size_t hdr_len; + u8 *crypto_hdr; + u16 qos_ctl; + + /* pull decapped header */ + hdr = (struct ieee80211_hdr *)msdu->data; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + skb_pull(msdu, hdr_len); + + /* Rebuild qos header */ + hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + + /* Reset the order bit as the HT_Control header is stripped */ + hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); + + qos_ctl = rxcb->tid; + + if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) + qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; + + /* TODO: Add other QoS ctl fields when required */ + + /* copy decap header before overwriting for reuse below */ + memcpy(decap_hdr, hdr, hdr_len); + + /* Rebuild crypto header for mac80211 use */ + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); + ath12k_dp_rx_desc_get_crypto_header(ar->ab, + rxcb->rx_desc, crypto_hdr, + enctype); + } + + memcpy(skb_push(msdu, + IEEE80211_QOS_CTL_LEN), &qos_ctl, + IEEE80211_QOS_CTL_LEN); + memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); +} + +static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status, + bool decrypted) +{ + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct ieee80211_hdr *hdr; + size_t hdr_len; + size_t crypto_len; + + if (!rxcb->is_first_msdu || + !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { + WARN_ON_ONCE(1); + return; + } + + skb_trim(msdu, msdu->len - FCS_LEN); + + if (!decrypted) + return; + + hdr = (void *)msdu->data; + + /* Tail */ + if (status->flag & RX_FLAG_IV_STRIPPED) { + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_mic_len(ar, enctype)); + + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_icv_len(ar, enctype)); + } else { + /* MIC */ + if (status->flag & RX_FLAG_MIC_STRIPPED) + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_mic_len(ar, enctype)); + + /* ICV */ + if (status->flag & RX_FLAG_ICV_STRIPPED) + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_icv_len(ar, enctype)); + } + + /* MMIC */ + if ((status->flag & RX_FLAG_MMIC_STRIPPED) && + !ieee80211_has_morefrags(hdr->frame_control) && + enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) + skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); + + /* Head */ + if (status->flag & RX_FLAG_IV_STRIPPED) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); + + memmove(msdu->data + crypto_len, msdu->data, hdr_len); + skb_pull(msdu, crypto_len); + } +} + +static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, + struct sk_buff *msdu, + struct ath12k_skb_rxcb *rxcb, + struct ieee80211_rx_status *status, + enum hal_encrypt_type enctype) +{ + struct hal_rx_desc *rx_desc = rxcb->rx_desc; + struct ath12k_base *ab = ar->ab; + size_t hdr_len, crypto_len; + struct ieee80211_hdr *hdr; + u16 qos_ctl; + __le16 fc; + u8 *crypto_hdr; + + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); + crypto_hdr = skb_push(msdu, crypto_len); + ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); + } + + fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc)); + hdr_len = ieee80211_hdrlen(fc); + skb_push(msdu, hdr_len); + hdr = (struct ieee80211_hdr *)msdu->data; + hdr->frame_control = fc; + + /* Get wifi header from rx_desc */ + ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr); + + if (rxcb->is_mcbc) + status->flag &= ~RX_FLAG_PN_VALIDATED; + + /* Add QOS header */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos_ctl = rxcb->tid; + if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc)) + qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; + + /* TODO: Add other QoS ctl fields when required */ + memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN), + &qos_ctl, IEEE80211_QOS_CTL_LEN); + } +} + +static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, + struct sk_buff *msdu, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status) +{ + struct ieee80211_hdr *hdr; + struct ethhdr *eth; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; + + eth = (struct ethhdr *)msdu->data; + ether_addr_copy(da, eth->h_dest); + ether_addr_copy(sa, eth->h_source); + rfc.snap_type = eth->h_proto; + skb_pull(msdu, sizeof(*eth)); + memcpy(skb_push(msdu, sizeof(rfc)), &rfc, + sizeof(rfc)); + ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); + + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} + +static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, + struct hal_rx_desc *rx_desc, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status, + bool decrypted) +{ + struct ath12k_base *ab = ar->ab; + u8 decap; + struct ethhdr *ehdr; + + decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); + + switch (decap) { + case DP_RX_DECAP_TYPE_NATIVE_WIFI: + ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); + break; + case DP_RX_DECAP_TYPE_RAW: + ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, + decrypted); + break; + case DP_RX_DECAP_TYPE_ETHERNET2_DIX: + ehdr = (struct ethhdr *)msdu->data; + + /* mac80211 allows fast path only for authorized STA */ + if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { + ATH12K_SKB_RXCB(msdu)->is_eapol = true; + ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); + break; + } + + /* PN for mcast packets will be validated in mac80211; + * remove eth header and add 802.11 header. + */ + if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) + ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); + break; + case DP_RX_DECAP_TYPE_8023: + /* TODO: Handle undecap for these formats */ + break; + } +} + +struct ath12k_peer * +ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu) +{ + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct hal_rx_desc *rx_desc = rxcb->rx_desc; + struct ath12k_peer *peer = NULL; + + lockdep_assert_held(&ab->base_lock); + + if (rxcb->peer_id) + peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); + + if (peer) + return peer; + + if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) + return NULL; + + peer = ath12k_peer_find_by_addr(ab, + ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, + rx_desc)); + return peer; +} + +static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, + struct sk_buff *msdu, + struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + bool fill_crypto_hdr; + struct ath12k_base *ab = ar->ab; + struct ath12k_skb_rxcb *rxcb; + enum hal_encrypt_type enctype; + bool is_decrypted = false; + struct ieee80211_hdr *hdr; + struct ath12k_peer *peer; + u32 err_bitmap; + + /* PN for multicast packets will be checked in mac80211 */ + rxcb = ATH12K_SKB_RXCB(msdu); + fill_crypto_hdr = ath12k_dp_rx_h_is_mcbc(ar->ab, rx_desc); + rxcb->is_mcbc = fill_crypto_hdr; + + if (rxcb->is_mcbc) + rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc); + + spin_lock_bh(&ar->ab->base_lock); + peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu); + if (peer) { + if (rxcb->is_mcbc) + enctype = peer->sec_type_grp; + else + enctype = peer->sec_type; + } else { + enctype = HAL_ENCRYPT_TYPE_OPEN; + } + spin_unlock_bh(&ar->ab->base_lock); + + err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); + if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) + is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); + + /* Clear per-MPDU flags while leaving per-PPDU flags intact */ + rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED); + + if (err_bitmap & HAL_RX_MPDU_ERR_FCS) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) + rx_status->flag |= RX_FLAG_MMIC_ERROR; + + if (is_decrypted) { + rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; + + if (fill_crypto_hdr) + rx_status->flag |= RX_FLAG_MIC_STRIPPED | + RX_FLAG_ICV_STRIPPED; + else + rx_status->flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_PN_VALIDATED; + } + + ath12k_dp_rx_h_csum_offload(ar, msdu); + ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, + enctype, rx_status, is_decrypted); + + if (!is_decrypted || fill_crypto_hdr) + return; + + if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) != + DP_RX_DECAP_TYPE_ETHERNET2_DIX) { + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } +} + +static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_supported_band *sband; + enum rx_msdu_start_pkt_type pkt_type; + u8 bw; + u8 rate_mcs, nss; + u8 sgi; + bool is_cck; + + pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); + bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); + rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); + nss = ath12k_dp_rx_h_nss(ab, rx_desc); + sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); + + switch (pkt_type) { + case RX_MSDU_START_PKT_TYPE_11A: + case RX_MSDU_START_PKT_TYPE_11B: + is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); + sband = &ar->mac.sbands[rx_status->band]; + rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, + is_cck); + break; + case RX_MSDU_START_PKT_TYPE_11N: + rx_status->encoding = RX_ENC_HT; + if (rate_mcs > ATH12K_HT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in HT mode %d\n", + rate_mcs); + break; + } + rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); + if (sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); + break; + case RX_MSDU_START_PKT_TYPE_11AC: + rx_status->encoding = RX_ENC_VHT; + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ATH12K_VHT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in VHT mode %d\n", + rate_mcs); + break; + } + rx_status->nss = nss; + if (sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); + break; + case RX_MSDU_START_PKT_TYPE_11AX: + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ATH12K_HE_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in HE mode %d\n", + rate_mcs); + break; + } + rx_status->encoding = RX_ENC_HE; + rx_status->nss = nss; + rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); + break; + } +} + +void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + struct ath12k_base *ab = ar->ab; + u8 channel_num; + u32 center_freq, meta_data; + struct ieee80211_channel *channel; + + rx_status->freq = 0; + rx_status->rate_idx = 0; + rx_status->nss = 0; + rx_status->encoding = RX_ENC_LEGACY; + rx_status->bw = RATE_INFO_BW_20; + rx_status->enc_flags = 0; + + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); + channel_num = meta_data; + center_freq = meta_data >> 16; + + if (center_freq >= 5935 && center_freq <= 7105) { + rx_status->band = NL80211_BAND_6GHZ; + } else if (channel_num >= 1 && channel_num <= 14) { + rx_status->band = NL80211_BAND_2GHZ; + } else if (channel_num >= 36 && channel_num <= 173) { + rx_status->band = NL80211_BAND_5GHZ; + } else { + spin_lock_bh(&ar->data_lock); + channel = ar->rx_channel; + if (channel) { + rx_status->band = channel->band; + channel_num = + ieee80211_frequency_to_channel(channel->center_freq); + } + spin_unlock_bh(&ar->data_lock); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", + rx_desc, sizeof(*rx_desc)); + } + + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); + + ath12k_dp_rx_h_rate(ar, rx_desc, rx_status); +} + +static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, + struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + struct ath12k_base *ab = ar->ab; + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), + }; + struct ieee80211_radiotap_he *he; + struct ieee80211_rx_status *rx_status; + struct ieee80211_sta *pubsta; + struct ath12k_peer *peer; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u8 decap = DP_RX_DECAP_TYPE_RAW; + bool is_mcbc = rxcb->is_mcbc; + bool is_eapol = rxcb->is_eapol; + + if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && + !(status->flag & RX_FLAG_SKIP_MONITOR)) { + he = skb_push(msdu, sizeof(known)); + memcpy(he, &known, sizeof(known)); + status->flag |= RX_FLAG_RADIOTAP_HE; + } + + if (!(status->flag & RX_FLAG_ONLY_MONITOR)) + decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc); + + spin_lock_bh(&ab->base_lock); + peer = ath12k_dp_rx_h_find_peer(ab, msdu); + + pubsta = peer ? peer->sta : NULL; + + spin_unlock_bh(&ab->base_lock); + + ath12k_dbg(ab, ATH12K_DBG_DATA, + "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + msdu, + msdu->len, + peer ? peer->addr : NULL, + rxcb->tid, + is_mcbc ? "mcast" : "ucast", + ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), + (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", + (status->encoding == RX_ENC_HT) ? "ht" : "", + (status->encoding == RX_ENC_VHT) ? "vht" : "", + (status->encoding == RX_ENC_HE) ? "he" : "", + (status->bw == RATE_INFO_BW_40) ? "40" : "", + (status->bw == RATE_INFO_BW_80) ? "80" : "", + (status->bw == RATE_INFO_BW_160) ? "160" : "", + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", + status->rate_idx, + status->nss, + status->freq, + status->band, status->flag, + !!(status->flag & RX_FLAG_FAILED_FCS_CRC), + !!(status->flag & RX_FLAG_MMIC_ERROR), + !!(status->flag & RX_FLAG_AMSDU_MORE)); + + ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", + msdu->data, msdu->len); + + rx_status = IEEE80211_SKB_RXCB(msdu); + *rx_status = *status; + + /* TODO: trace rx packet */ + + /* PN for multicast packets are not validate in HW, + * so skip 802.3 rx path + * Also, fast_rx expectes the STA to be authorized, hence + * eapol packets are sent in slow path. + */ + if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && + !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) + rx_status->flag |= RX_FLAG_8023; + + ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); +} + +static int ath12k_dp_rx_process_msdu(struct ath12k *ar, + struct sk_buff *msdu, + struct sk_buff_head *msdu_list, + struct ieee80211_rx_status *rx_status) +{ + struct ath12k_base *ab = ar->ab; + struct hal_rx_desc *rx_desc, *lrx_desc; + struct ath12k_skb_rxcb *rxcb; + struct sk_buff *last_buf; + u8 l3_pad_bytes; + u16 msdu_len; + int ret; + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); + if (!last_buf) { + ath12k_warn(ab, + "No valid Rx buffer to access MSDU_END tlv\n"); + ret = -EIO; + goto free_out; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + lrx_desc = (struct hal_rx_desc *)last_buf->data; + if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { + ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); + ret = -EIO; + goto free_out; + } + + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->rx_desc = rx_desc; + msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); + l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); + + if (rxcb->is_frag) { + skb_pull(msdu, hal_rx_desc_sz); + } else if (!rxcb->is_continuation) { + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + ret = -EINVAL; + ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); + ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, + sizeof(*rx_desc)); + goto free_out; + } + skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); + } else { + ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, + msdu, last_buf, + l3_pad_bytes, msdu_len); + if (ret) { + ath12k_warn(ab, + "failed to coalesce msdu rx buffer%d\n", ret); + goto free_out; + } + } + + ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status); + ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); + + rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; + + return 0; + +free_out: + return ret; +} + +static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, + struct napi_struct *napi, + struct sk_buff_head *msdu_list, + int ring_id) +{ + struct ieee80211_rx_status rx_status = {0}; + struct ath12k_skb_rxcb *rxcb; + struct sk_buff *msdu; + struct ath12k *ar; + u8 mac_id; + int ret; + + if (skb_queue_empty(msdu_list)) + return; + + rcu_read_lock(); + + while ((msdu = __skb_dequeue(msdu_list))) { + rxcb = ATH12K_SKB_RXCB(msdu); + mac_id = rxcb->mac_id; + ar = ab->pdevs[mac_id].ar; + if (!rcu_dereference(ab->pdevs_active[mac_id])) { + dev_kfree_skb_any(msdu); + continue; + } + + if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + continue; + } + + ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); + if (ret) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Unable to process msdu %d", ret); + dev_kfree_skb_any(msdu); + continue; + } + + ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); + } + + rcu_read_unlock(); +} + +int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, + struct napi_struct *napi, int budget) +{ + struct ath12k_rx_desc_info *desc_info; + struct ath12k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + struct hal_reo_dest_ring *desc; + int num_buffs_reaped = 0; + struct sk_buff_head msdu_list; + struct ath12k_skb_rxcb *rxcb; + int total_msdu_reaped = 0; + struct hal_srng *srng; + struct sk_buff *msdu; + bool done = false; + int mac_id; + u64 desc_va; + + __skb_queue_head_init(&msdu_list); + + srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; + + spin_lock_bh(&srng->lock); + +try_again: + ath12k_hal_srng_access_begin(ab, srng); + + while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + enum hal_reo_dest_ring_push_reason push_reason; + u32 cookie; + + cookie = le32_get_bits(desc->buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + + mac_id = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + + desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | + le32_to_cpu(desc->buf_va_lo)); + desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + + /* retry manual desc retrieval */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(ab, cookie); + if (!desc_info) { + ath12k_warn(ab, "Invalid cookie in manual desc retrival"); + continue; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(ab, "Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + + spin_lock_bh(&dp->rx_desc_lock); + list_move_tail(&desc_info->list, &dp->rx_desc_free_list); + spin_unlock_bh(&dp->rx_desc_lock); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + num_buffs_reaped++; + + push_reason = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_PUSH_REASON); + if (push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { + dev_kfree_skb_any(msdu); + ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; + continue; + } + + rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); + rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); + rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); + rxcb->mac_id = mac_id; + rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data, + RX_MPDU_DESC_META_DATA_PEER_ID); + rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0, + RX_MPDU_DESC_INFO0_TID); + + __skb_queue_tail(&msdu_list, msdu); + + if (!rxcb->is_continuation) { + total_msdu_reaped++; + done = true; + } else { + done = false; + } + + if (total_msdu_reaped >= budget) + break; + } + + /* Hw might have updated the head pointer after we cached it. + * In this case, even though there are entries in the ring we'll + * get rx_desc NULL. Give the read another try with updated cached + * head pointer so that we can reap complete MPDU in the current + * rx processing. + */ + if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { + ath12k_hal_srng_access_end(ab, srng); + goto try_again; + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!total_msdu_reaped) + goto exit; + + /* TODO: Move to implicit BM? */ + ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped, + ab->hw_params->hal_params->rx_buf_rbm, true); + + ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, + ring_id); + +exit: + return total_msdu_reaped; +} + +static void ath12k_dp_rx_frag_timer(struct timer_list *timer) +{ + struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); + + spin_lock_bh(&rx_tid->ab->base_lock); + if (rx_tid->last_frag_no && + rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { + spin_unlock_bh(&rx_tid->ab->base_lock); + return; + } + ath12k_dp_rx_frags_cleanup(rx_tid, true); + spin_unlock_bh(&rx_tid->ab->base_lock); +} + +int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) +{ + struct ath12k_base *ab = ar->ab; + struct crypto_shash *tfm; + struct ath12k_peer *peer; + struct ath12k_dp_rx_tid *rx_tid; + int i; + + tfm = crypto_alloc_shash("michael_mic", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find(ab, vdev_id, peer_mac); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); + return -ENOENT; + } + + for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { + rx_tid = &peer->rx_tid[i]; + rx_tid->ab = ab; + timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); + skb_queue_head_init(&rx_tid->rx_frags); + } + + peer->tfm_mmic = tfm; + spin_unlock_bh(&ab->base_lock); + + return 0; +} + +static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, + struct ieee80211_hdr *hdr, u8 *data, + size_t data_len, u8 *mic) +{ + SHASH_DESC_ON_STACK(desc, tfm); + u8 mic_hdr[16] = {0}; + u8 tid = 0; + int ret; + + if (!tfm) + return -EINVAL; + + desc->tfm = tfm; + + ret = crypto_shash_setkey(tfm, key, 8); + if (ret) + goto out; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + /* TKIP MIC header */ + memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + mic_hdr[12] = tid; + + ret = crypto_shash_update(desc, mic_hdr, 16); + if (ret) + goto out; + ret = crypto_shash_update(desc, data, data_len); + if (ret) + goto out; + ret = crypto_shash_final(desc, mic); +out: + shash_desc_zero(desc); + return ret; +} + +static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, + struct sk_buff *msdu) +{ + struct ath12k_base *ab = ar->ab; + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; + struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); + struct ieee80211_key_conf *key_conf; + struct ieee80211_hdr *hdr; + u8 mic[IEEE80211_CCMP_MIC_LEN]; + int head_len, tail_len, ret; + size_t data_len; + u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + u8 *key, *data; + u8 key_idx; + + if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) + return 0; + + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); + hdr_len = ieee80211_hdrlen(hdr->frame_control); + head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; + tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; + + if (!is_multicast_ether_addr(hdr->addr1)) + key_idx = peer->ucast_keyidx; + else + key_idx = peer->mcast_keyidx; + + key_conf = peer->keys[key_idx]; + + data = msdu->data + head_len; + data_len = msdu->len - head_len - tail_len; + key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; + + ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); + if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) + goto mic_fail; + + return 0; + +mic_fail: + (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; + (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; + + rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | + RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; + skb_pull(msdu, hal_rx_desc_sz); + + ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); + ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, + HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); + ieee80211_rx(ar->hw, msdu); + return -EINVAL; +} + +static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, + enum hal_encrypt_type enctype, u32 flags) +{ + struct ieee80211_hdr *hdr; + size_t hdr_len; + size_t crypto_len; + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + if (!flags) + return; + + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); + + if (flags & RX_FLAG_MIC_STRIPPED) + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_mic_len(ar, enctype)); + + if (flags & RX_FLAG_ICV_STRIPPED) + skb_trim(msdu, msdu->len - + ath12k_dp_rx_crypto_icv_len(ar, enctype)); + + if (flags & RX_FLAG_IV_STRIPPED) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); + + memmove(msdu->data + hal_rx_desc_sz + crypto_len, + msdu->data + hal_rx_desc_sz, hdr_len); + skb_pull(msdu, crypto_len); + } +} + +static int ath12k_dp_rx_h_defrag(struct ath12k *ar, + struct ath12k_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + struct sk_buff **defrag_skb) +{ + struct ath12k_base *ab = ar->ab; + struct hal_rx_desc *rx_desc; + struct sk_buff *skb, *first_frag, *last_frag; + struct ieee80211_hdr *hdr; + enum hal_encrypt_type enctype; + bool is_decrypted = false; + int msdu_len = 0; + int extra_space; + u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + first_frag = skb_peek(&rx_tid->rx_frags); + last_frag = skb_peek_tail(&rx_tid->rx_frags); + + skb_queue_walk(&rx_tid->rx_frags, skb) { + flags = 0; + rx_desc = (struct hal_rx_desc *)skb->data; + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); + + enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); + if (enctype != HAL_ENCRYPT_TYPE_OPEN) + is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, + rx_desc); + + if (is_decrypted) { + if (skb != first_frag) + flags |= RX_FLAG_IV_STRIPPED; + if (skb != last_frag) + flags |= RX_FLAG_ICV_STRIPPED | + RX_FLAG_MIC_STRIPPED; + } + + /* RX fragments are always raw packets */ + if (skb != last_frag) + skb_trim(skb, skb->len - FCS_LEN); + ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); + + if (skb != first_frag) + skb_pull(skb, hal_rx_desc_sz + + ieee80211_hdrlen(hdr->frame_control)); + msdu_len += skb->len; + } + + extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); + if (extra_space > 0 && + (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) + return -ENOMEM; + + __skb_unlink(first_frag, &rx_tid->rx_frags); + while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { + skb_put_data(first_frag, skb->data, skb->len); + dev_kfree_skb_any(skb); + } + + hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); + ATH12K_SKB_RXCB(first_frag)->is_frag = 1; + + if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) + first_frag = NULL; + + *defrag_skb = first_frag; + return 0; +} + +static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, + struct ath12k_dp_rx_tid *rx_tid, + struct sk_buff *defrag_skb) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; + struct hal_reo_entrance_ring *reo_ent_ring; + struct hal_reo_dest_ring *reo_dest_ring; + struct dp_link_desc_bank *link_desc_banks; + struct hal_rx_msdu_link *msdu_link; + struct hal_rx_msdu_details *msdu0; + struct hal_srng *srng; + dma_addr_t link_paddr, buf_paddr; + u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; + u32 cookie, hal_rx_desc_sz, dest_ring_info0; + int ret; + struct ath12k_rx_desc_info *desc_info; + u8 dst_ind; + + hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + link_desc_banks = dp->link_desc_banks; + reo_dest_ring = rx_tid->dst_ring_desc; + + ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, + &link_paddr, &cookie); + desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); + + msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + + (link_paddr - link_desc_banks[desc_bank].paddr)); + msdu0 = &msdu_link->msdu_link[0]; + msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); + dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); + + memset(msdu0, 0, sizeof(*msdu0)); + + msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | + u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | + u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, + RX_MSDU_DESC_INFO0_MSDU_LENGTH) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); + msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); + msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); + + /* change msdu len in hal rx desc */ + ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); + + buf_paddr = dma_map_single(ab->dev, defrag_skb->data, + defrag_skb->len + skb_tailroom(defrag_skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ab->dev, buf_paddr)) + return -ENOMEM; + + spin_lock_bh(&dp->rx_desc_lock); + desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, + struct ath12k_rx_desc_info, + list); + if (!desc_info) { + spin_unlock_bh(&dp->rx_desc_lock); + ath12k_warn(ab, "failed to find rx desc for reinject\n"); + ret = -ENOMEM; + goto err_unmap_dma; + } + + desc_info->skb = defrag_skb; + + list_del(&desc_info->list); + list_add_tail(&desc_info->list, &dp->rx_desc_used_list); + spin_unlock_bh(&dp->rx_desc_lock); + + ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; + + ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, + desc_info->cookie, + HAL_RX_BUF_RBM_SW3_BM); + + /* Fill mpdu details into reo entrace ring */ + srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; + + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!reo_ent_ring) { + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + ret = -ENOSPC; + goto err_free_desc; + } + memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); + + ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, + cookie, + HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST); + + mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | + u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | + u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | + u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | + u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); + + reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); + reo_ent_ring->rx_mpdu_info.peer_meta_data = + reo_dest_ring->rx_mpdu_info.peer_meta_data; + + reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr)); + reo_ent_ring->info0 = le32_encode_bits(upper_32_bits(rx_tid->paddr), + HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND); + + reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, + HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); + dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + reo_ent_ring->info2 = + cpu_to_le32(u32_get_bits(dest_ring_info0, + HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return 0; + +err_free_desc: + spin_lock_bh(&dp->rx_desc_lock); + list_del(&desc_info->list); + list_add_tail(&desc_info->list, &dp->rx_desc_free_list); + desc_info->skb = NULL; + spin_unlock_bh(&dp->rx_desc_lock); +err_unmap_dma: + dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), + DMA_FROM_DEVICE); + return ret; +} + +static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, + struct sk_buff *a, struct sk_buff *b) +{ + int frag1, frag2; + + frag1 = ath12k_dp_rx_h_frag_no(ab, a); + frag2 = ath12k_dp_rx_h_frag_no(ab, b); + + return frag1 - frag2; +} + +static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, + struct sk_buff_head *frag_list, + struct sk_buff *cur_frag) +{ + struct sk_buff *skb; + int cmp; + + skb_queue_walk(frag_list, skb) { + cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); + if (cmp < 0) + continue; + __skb_queue_before(frag_list, skb, cur_frag); + return; + } + __skb_queue_tail(frag_list, cur_frag); +} + +static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + u64 pn = 0; + u8 *ehdr; + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); + ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); + + pn = ehdr[0]; + pn |= (u64)ehdr[1] << 8; + pn |= (u64)ehdr[4] << 16; + pn |= (u64)ehdr[5] << 24; + pn |= (u64)ehdr[6] << 32; + pn |= (u64)ehdr[7] << 40; + + return pn; +} + +static bool +ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) +{ + struct ath12k_base *ab = ar->ab; + enum hal_encrypt_type encrypt_type; + struct sk_buff *first_frag, *skb; + struct hal_rx_desc *desc; + u64 last_pn; + u64 cur_pn; + + first_frag = skb_peek(&rx_tid->rx_frags); + desc = (struct hal_rx_desc *)first_frag->data; + + encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); + if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && + encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && + encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && + encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) + return true; + + last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); + skb_queue_walk(&rx_tid->rx_frags, skb) { + if (skb == first_frag) + continue; + + cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); + if (cur_pn != last_pn + 1) + return false; + last_pn = cur_pn; + } + return true; +} + +static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, + struct sk_buff *msdu, + struct hal_reo_dest_ring *ring_desc) +{ + struct ath12k_base *ab = ar->ab; + struct hal_rx_desc *rx_desc; + struct ath12k_peer *peer; + struct ath12k_dp_rx_tid *rx_tid; + struct sk_buff *defrag_skb = NULL; + u32 peer_id; + u16 seqno, frag_no; + u8 tid; + int ret = 0; + bool more_frags; + + rx_desc = (struct hal_rx_desc *)msdu->data; + peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); + tid = ath12k_dp_rx_h_tid(ab, rx_desc); + seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); + frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); + more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); + + if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || + !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || + tid > IEEE80211_NUM_TIDS) + return -EINVAL; + + /* received unfragmented packet in reo + * exception ring, this shouldn't happen + * as these packets typically come from + * reo2sw srngs. + */ + if (WARN_ON_ONCE(!frag_no && !more_frags)) + return -EINVAL; + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, peer_id); + if (!peer) { + ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", + peer_id); + ret = -ENOENT; + goto out_unlock; + } + rx_tid = &peer->rx_tid[tid]; + + if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || + skb_queue_empty(&rx_tid->rx_frags)) { + /* Flush stored fragments and start a new sequence */ + ath12k_dp_rx_frags_cleanup(rx_tid, true); + rx_tid->cur_sn = seqno; + } + + if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { + /* Fragment already present */ + ret = -EINVAL; + goto out_unlock; + } + + if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + __skb_queue_tail(&rx_tid->rx_frags, msdu); + else + ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); + + rx_tid->rx_frag_bitmap |= BIT(frag_no); + if (!more_frags) + rx_tid->last_frag_no = frag_no; + + if (frag_no == 0) { + rx_tid->dst_ring_desc = kmemdup(ring_desc, + sizeof(*rx_tid->dst_ring_desc), + GFP_ATOMIC); + if (!rx_tid->dst_ring_desc) { + ret = -ENOMEM; + goto out_unlock; + } + } else { + ath12k_dp_rx_link_desc_return(ab, ring_desc, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } + + if (!rx_tid->last_frag_no || + rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { + mod_timer(&rx_tid->frag_timer, jiffies + + ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); + goto out_unlock; + } + + spin_unlock_bh(&ab->base_lock); + del_timer_sync(&rx_tid->frag_timer); + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find_by_id(ab, peer_id); + if (!peer) + goto err_frags_cleanup; + + if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) + goto err_frags_cleanup; + + if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) + goto err_frags_cleanup; + + if (!defrag_skb) + goto err_frags_cleanup; + + if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) + goto err_frags_cleanup; + + ath12k_dp_rx_frags_cleanup(rx_tid, false); + goto out_unlock; + +err_frags_cleanup: + dev_kfree_skb_any(defrag_skb); + ath12k_dp_rx_frags_cleanup(rx_tid, true); +out_unlock: + spin_unlock_bh(&ab->base_lock); + return ret; +} + +static int +ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, + bool drop, u32 cookie) +{ + struct ath12k_base *ab = ar->ab; + struct sk_buff *msdu; + struct ath12k_skb_rxcb *rxcb; + struct hal_rx_desc *rx_desc; + u16 msdu_len; + u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + struct ath12k_rx_desc_info *desc_info; + u64 desc_va; + + desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | + le32_to_cpu(desc->buf_va_lo)); + desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + + /* retry manual desc retrieval */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(ab, cookie); + if (!desc_info) { + ath12k_warn(ab, "Invalid cookie in manual desc retrival"); + return -EINVAL; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(ab, " RX Exception, Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + spin_lock_bh(&ab->dp.rx_desc_lock); + list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list); + spin_unlock_bh(&ab->dp.rx_desc_lock); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + if (drop) { + dev_kfree_skb_any(msdu); + return 0; + } + + rcu_read_lock(); + if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { + dev_kfree_skb_any(msdu); + goto exit; + } + + if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + goto exit; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, + sizeof(*rx_desc)); + dev_kfree_skb_any(msdu); + goto exit; + } + + skb_put(msdu, hal_rx_desc_sz + msdu_len); + + if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { + dev_kfree_skb_any(msdu); + ath12k_dp_rx_link_desc_return(ar->ab, desc, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } +exit: + rcu_read_unlock(); + return 0; +} + +int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, + int budget) +{ + u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; + struct dp_link_desc_bank *link_desc_banks; + enum hal_rx_buf_return_buf_manager rbm; + struct hal_rx_msdu_link *link_desc_va; + int tot_n_bufs_reaped, quota, ret, i; + struct hal_reo_dest_ring *reo_desc; + struct dp_rxdma_ring *rx_ring; + struct dp_srng *reo_except; + u32 desc_bank, num_msdus; + struct hal_srng *srng; + struct ath12k_dp *dp; + int mac_id; + struct ath12k *ar; + dma_addr_t paddr; + bool is_frag; + bool drop = false; + + tot_n_bufs_reaped = 0; + quota = budget; + + dp = &ab->dp; + reo_except = &dp->reo_except_ring; + link_desc_banks = dp->link_desc_banks; + + srng = &ab->hal.srng_list[reo_except->ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (budget && + (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + ab->soc_stats.err_ring_pkts++; + ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, + &desc_bank); + if (ret) { + ath12k_warn(ab, "failed to parse error reo desc %d\n", + ret); + continue; + } + link_desc_va = link_desc_banks[desc_bank].vaddr + + (paddr - link_desc_banks[desc_bank].paddr); + ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, + &rbm); + if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST && + rbm != HAL_RX_BUF_RBM_SW3_BM && + rbm != ab->hw_params->hal_params->rx_buf_rbm) { + ab->soc_stats.invalid_rbm++; + ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); + ath12k_dp_rx_link_desc_return(ab, reo_desc, + HAL_WBM_REL_BM_ACT_REL_MSDU); + continue; + } + + is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & + RX_MPDU_DESC_INFO0_FRAG_FLAG); + + /* Process only rx fragments with one msdu per link desc below, and drop + * msdu's indicated due to error reasons. + */ + if (!is_frag || num_msdus > 1) { + drop = true; + /* Return the link desc back to wbm idle list */ + ath12k_dp_rx_link_desc_return(ab, reo_desc, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } + + for (i = 0; i < num_msdus; i++) { + mac_id = le32_get_bits(reo_desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + + ar = ab->pdevs[mac_id].ar; + + if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop, + msdu_cookies[i])) + tot_n_bufs_reaped++; + } + + if (tot_n_bufs_reaped >= quota) { + tot_n_bufs_reaped = quota; + goto exit; + } + + budget = quota - tot_n_bufs_reaped; + } + +exit: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + rx_ring = &dp->rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped, + ab->hw_params->hal_params->rx_buf_rbm, true); + + return tot_n_bufs_reaped; +} + +static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, + int msdu_len, + struct sk_buff_head *msdu_list) +{ + struct sk_buff *skb, *tmp; + struct ath12k_skb_rxcb *rxcb; + int n_buffs; + + n_buffs = DIV_ROUND_UP(msdu_len, + (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz)); + + skb_queue_walk_safe(msdu_list, skb, tmp) { + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && + rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { + if (!n_buffs) + break; + __skb_unlink(skb, msdu_list); + dev_kfree_skb_any(skb); + n_buffs--; + } + } +} + +static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status, + struct sk_buff_head *msdu_list) +{ + struct ath12k_base *ab = ar->ab; + u16 msdu_len, peer_id; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); + peer_id = ath12k_dp_rx_h_peer_id(ab, desc); + + if (!ath12k_peer_find_by_id(ab, peer_id)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n", + peer_id); + return -EINVAL; + } + + if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { + /* First buffer will be freed by the caller, so deduct it's length */ + msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); + ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); + return -EINVAL; + } + + /* Even after cleaning up the sg buffers in the msdu list with above check + * any msdu received with continuation flag needs to be dropped as invalid. + * This protects against some random err frame with continuation flag. + */ + if (rxcb->is_continuation) + return -EINVAL; + + if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { + ath12k_warn(ar->ab, + "msdu_done bit not set in null_q_des processing\n"); + __skb_queue_purge(msdu_list); + return -EIO; + } + + /* Handle NULL queue descriptor violations arising out a missing + * REO queue for a given peer or a given TID. This typically + * may happen if a packet is received on a QOS enabled TID before the + * ADDBA negotiation for that TID, when the TID queue is setup. Or + * it may also happen for MC/BC frames if they are not routed to the + * non-QOS TID queue, in the absence of any other default TID queue. + * This error can show up both in a REO destination or WBM release ring. + */ + + if (rxcb->is_frag) { + skb_pull(msdu, hal_rx_desc_sz); + } else { + l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); + + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) + return -EINVAL; + + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + } + ath12k_dp_rx_h_ppdu(ar, desc, status); + + ath12k_dp_rx_h_mpdu(ar, msdu, desc, status); + + rxcb->tid = ath12k_dp_rx_h_tid(ab, desc); + + /* Please note that caller will having the access to msdu and completing + * rx with mac80211. Need not worry about cleaning up amsdu_list. + */ + + return 0; +} + +static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status, + struct sk_buff_head *msdu_list) +{ + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + bool drop = false; + + ar->ab->soc_stats.reo_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: + if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) + drop = true; + break; + case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: + /* TODO: Do not drop PN failed packets in the driver; + * instead, it is good to drop such packets in mac80211 + * after incrementing the replay counters. + */ + fallthrough; + default: + /* TODO: Review other errors and process them to mac80211 + * as appropriate. + */ + drop = true; + break; + } + + return drop; +} + +static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + struct ath12k_base *ab = ar->ab; + u16 msdu_len; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; + + rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); + rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); + + l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); + msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + + ath12k_dp_rx_h_ppdu(ar, desc, status); + + status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED); + + ath12k_dp_rx_h_undecap(ar, msdu, desc, + HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); +} + +static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; + bool drop = false; + u32 err_bitmap; + + ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: + case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: + err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); + if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { + ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); + break; + } + fallthrough; + default: + /* TODO: Review other rxdma error code to check if anything is + * worth reporting to mac80211 + */ + drop = true; + break; + } + + return drop; +} + +static void ath12k_dp_rx_wbm_err(struct ath12k *ar, + struct napi_struct *napi, + struct sk_buff *msdu, + struct sk_buff_head *msdu_list) +{ + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct ieee80211_rx_status rxs = {0}; + bool drop = true; + + switch (rxcb->err_rel_src) { + case HAL_WBM_REL_SRC_MODULE_REO: + drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); + break; + case HAL_WBM_REL_SRC_MODULE_RXDMA: + drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs); + break; + default: + /* msdu will get freed */ + break; + } + + if (drop) { + dev_kfree_skb_any(msdu); + return; + } + + ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); +} + +int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, + struct napi_struct *napi, int budget) +{ + struct ath12k *ar; + struct ath12k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring; + struct hal_rx_wbm_rel_info err_info; + struct hal_srng *srng; + struct sk_buff *msdu; + struct sk_buff_head msdu_list[MAX_RADIOS]; + struct ath12k_skb_rxcb *rxcb; + void *rx_desc; + int mac_id; + int num_buffs_reaped = 0; + struct ath12k_rx_desc_info *desc_info; + int ret, i; + + for (i = 0; i < ab->num_radios; i++) + __skb_queue_head_init(&msdu_list[i]); + + srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; + rx_ring = &dp->rx_refill_buf_ring; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (budget) { + rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); + if (!rx_desc) + break; + + ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); + if (ret) { + ath12k_warn(ab, + "failed to parse rx error in wbm_rel ring desc %d\n", + ret); + continue; + } + + desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc; + + /* retry manual desc retrieval if hw cc is not done */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); + if (!desc_info) { + ath12k_warn(ab, "Invalid cookie in manual desc retrival"); + continue; + } + } + + /* FIXME: Extract mac id correctly. Since descs are not tied + * to mac, we can extract from vdev id in ring desc. + */ + mac_id = 0; + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + + spin_lock_bh(&dp->rx_desc_lock); + list_move_tail(&desc_info->list, &dp->rx_desc_free_list); + spin_unlock_bh(&dp->rx_desc_lock); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + num_buffs_reaped++; + + if (!err_info.continuation) + budget--; + + if (err_info.push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + dev_kfree_skb_any(msdu); + continue; + } + + rxcb->err_rel_src = err_info.err_rel_src; + rxcb->err_code = err_info.err_code; + rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; + __skb_queue_tail(&msdu_list[mac_id], msdu); + + rxcb->is_first_msdu = err_info.first_msdu; + rxcb->is_last_msdu = err_info.last_msdu; + rxcb->is_continuation = err_info.continuation; + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!num_buffs_reaped) + goto done; + + ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped, + ab->hw_params->hal_params->rx_buf_rbm, true); + + rcu_read_lock(); + for (i = 0; i < ab->num_radios; i++) { + if (!rcu_dereference(ab->pdevs_active[i])) { + __skb_queue_purge(&msdu_list[i]); + continue; + } + + ar = ab->pdevs[i].ar; + + if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + __skb_queue_purge(&msdu_list[i]); + continue; + } + + while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) + ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); + } + rcu_read_unlock(); +done: + return num_buffs_reaped; +} + +void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct hal_tlv_64_hdr *hdr; + struct hal_srng *srng; + struct ath12k_dp_rx_reo_cmd *cmd, *tmp; + bool found = false; + u16 tag; + struct hal_reo_status reo_status; + + srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; + + memset(&reo_status, 0, sizeof(reo_status)); + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); + + switch (tag) { + case HAL_REO_GET_QUEUE_STATS_STATUS: + ath12k_hal_reo_status_queue_stats(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_QUEUE_STATUS: + ath12k_hal_reo_flush_queue_status(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_CACHE_STATUS: + ath12k_hal_reo_flush_cache_status(ab, hdr, + &reo_status); + break; + case HAL_REO_UNBLOCK_CACHE_STATUS: + ath12k_hal_reo_unblk_cache_status(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: + ath12k_hal_reo_flush_timeout_list_status(ab, hdr, + &reo_status); + break; + case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: + ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, + &reo_status); + break; + case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: + ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, + &reo_status); + break; + default: + ath12k_warn(ab, "Unknown reo status type %d\n", tag); + continue; + } + + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { + if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { + found = true; + list_del(&cmd->list); + break; + } + } + spin_unlock_bh(&dp->reo_cmd_lock); + + if (found) { + cmd->handler(dp, (void *)&cmd->data, + reo_status.uniform_hdr.cmd_status); + kfree(cmd); + } + + found = false; + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); +} + +void ath12k_dp_rx_free(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i; + + ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + if (ab->hw_params->rx_mac_buf_ring) + ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); + } + + for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) + ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); + + ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); + ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring); + + ath12k_dp_rxdma_buf_free(ab); +} + +void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) +{ + struct ath12k *ar = ab->pdevs[mac_id].ar; + + ath12k_dp_rx_pdev_srng_free(ar); +} + +int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 ring_id; + int ret; + u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + + tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; + tlv_filter.offset_valid = true; + tlv_filter.rx_packet_offset = hal_rx_desc_sz; + + tlv_filter.rx_mpdu_start_offset = + ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset(); + tlv_filter.rx_msdu_end_offset = + ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset(); + + /* TODO: Selectively subscribe to required qwords within msdu_end + * and mpdu_start and setup the mask in below msg + * and modify the rx_desc struct + */ + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, + HAL_RXDMA_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + + return ret; +} + +int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 ring_id; + int ret; + u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz; + int i; + + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + + tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; + tlv_filter.offset_valid = true; + tlv_filter.rx_packet_offset = hal_rx_desc_sz; + + tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); + + tlv_filter.rx_mpdu_start_offset = + ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset(); + tlv_filter.rx_msdu_end_offset = + ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset(); + + /* TODO: Selectively subscribe to required qwords within msdu_end + * and mpdu_start and setup the mask in below msg + * and modify the rx_desc struct + */ + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ring_id = dp->rx_mac_buf_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, + HAL_RXDMA_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + } + + return ret; +} + +int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + u32 ring_id; + int i, ret; + + /* TODO: Need to verify the HTT setup for QCN9224 */ + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); + if (ret) { + ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", + ret); + return ret; + } + + if (ab->hw_params->rx_mac_buf_ring) { + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ring_id = dp->rx_mac_buf_ring[i].ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + i, HAL_RXDMA_BUF); + if (ret) { + ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", + i, ret); + return ret; + } + } + } + + for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { + ring_id = dp->rxdma_err_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + i, HAL_RXDMA_DST); + if (ret) { + ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", + i, ret); + return ret; + } + } + + if (ab->hw_params->rxdma1_enable) { + ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + 0, HAL_RXDMA_MONITOR_BUF); + if (ret) { + ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", + ret); + return ret; + } + + ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + 0, HAL_TX_MONITOR_BUF); + if (ret) { + ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", + ret); + return ret; + } + } + + ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); + if (ret) { + ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); + return ret; + } + + return 0; +} + +int ath12k_dp_rx_alloc(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + int i, ret; + + idr_init(&dp->rx_refill_buf_ring.bufs_idr); + spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); + + idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); + spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); + + idr_init(&dp->tx_mon_buf_ring.bufs_idr); + spin_lock_init(&dp->tx_mon_buf_ring.idr_lock); + + ret = ath12k_dp_srng_setup(ab, + &dp->rx_refill_buf_ring.refill_buf_ring, + HAL_RXDMA_BUF, 0, 0, + DP_RXDMA_BUF_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); + return ret; + } + + if (ab->hw_params->rx_mac_buf_ring) { + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ret = ath12k_dp_srng_setup(ab, + &dp->rx_mac_buf_ring[i], + HAL_RXDMA_BUF, 1, + i, 1024); + if (ret) { + ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", + i); + return ret; + } + } + } + + for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { + ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], + HAL_RXDMA_DST, 0, i, + DP_RXDMA_ERR_DST_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); + return ret; + } + } + + if (ab->hw_params->rxdma1_enable) { + ret = ath12k_dp_srng_setup(ab, + &dp->rxdma_mon_buf_ring.refill_buf_ring, + HAL_RXDMA_MONITOR_BUF, 0, 0, + DP_RXDMA_MONITOR_BUF_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); + return ret; + } + + ret = ath12k_dp_srng_setup(ab, + &dp->tx_mon_buf_ring.refill_buf_ring, + HAL_TX_MONITOR_BUF, 0, 0, + DP_TX_MONITOR_BUF_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n"); + return ret; + } + } + + ret = ath12k_dp_rxdma_buf_setup(ab); + if (ret) { + ath12k_warn(ab, "failed to setup rxdma ring\n"); + return ret; + } + + return 0; +} + +int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) +{ + struct ath12k *ar = ab->pdevs[mac_id].ar; + struct ath12k_pdev_dp *dp = &ar->dp; + u32 ring_id; + int i; + int ret; + + if (!ab->hw_params->rxdma1_enable) + goto out; + + ret = ath12k_dp_rx_pdev_srng_alloc(ar); + if (ret) { + ath12k_warn(ab, "failed to setup rx srngs\n"); + return ret; + } + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + ring_id = dp->rxdma_mon_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + mac_id + i, + HAL_RXDMA_MONITOR_DST); + if (ret) { + ath12k_warn(ab, + "failed to configure rxdma_mon_dst_ring %d %d\n", + i, ret); + return ret; + } + + ring_id = dp->tx_mon_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, + mac_id + i, + HAL_TX_MONITOR_DST); + if (ret) { + ath12k_warn(ab, + "failed to configure tx_mon_dst_ring %d %d\n", + i, ret); + return ret; + } + } +out: + return 0; +} + +static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) +{ + struct ath12k_pdev_dp *dp = &ar->dp; + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; + + skb_queue_head_init(&pmon->rx_status_q); + + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + + memset(&pmon->rx_mon_stats, 0, + sizeof(pmon->rx_mon_stats)); + return 0; +} + +int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) +{ + struct ath12k_pdev_dp *dp = &ar->dp; + struct ath12k_mon_data *pmon = &dp->mon_data; + int ret = 0; + + ret = ath12k_dp_rx_pdev_mon_status_attach(ar); + if (ret) { + ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); + return ret; + } + + /* if rxdma1_enable is false, no need to setup + * rxdma_mon_desc_ring. + */ + if (!ar->ab->hw_params->rxdma1_enable) + return 0; + + pmon->mon_last_linkdesc_paddr = 0; + pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; + spin_lock_init(&pmon->mon_lock); + + return 0; +} + +int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab) +{ + /* start reap timer */ + mod_timer(&ab->mon_reap_timer, + jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); + + return 0; +} + +int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer) +{ + int ret; + + if (stop_timer) + del_timer_sync(&ab->mon_reap_timer); + + /* reap all the monitor related rings */ + ret = ath12k_dp_purge_mon_ring(ab); + if (ret) { + ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h new file mode 100644 index 000000000000..c955b5c859d1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_DP_RX_H +#define ATH12K_DP_RX_H + +#include "core.h" +#include "rx_desc.h" +#include "debug.h" + +#define DP_MAX_NWIFI_HDR_LEN 30 + +struct ath12k_dp_rx_tid { + u8 tid; + u32 *vaddr; + dma_addr_t paddr; + u32 size; + u32 ba_win_sz; + bool active; + + /* Info related to rx fragments */ + u32 cur_sn; + u16 last_frag_no; + u16 rx_frag_bitmap; + + struct sk_buff_head rx_frags; + struct hal_reo_dest_ring *dst_ring_desc; + + /* Timer info related to fragments */ + struct timer_list frag_timer; + struct ath12k_base *ab; +}; + +struct ath12k_dp_rx_reo_cache_flush_elem { + struct list_head list; + struct ath12k_dp_rx_tid data; + unsigned long ts; +}; + +struct ath12k_dp_rx_reo_cmd { + struct list_head list; + struct ath12k_dp_rx_tid data; + int cmd_num; + void (*handler)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status); +}; + +#define ATH12K_DP_RX_REO_DESC_FREE_THRES 64 +#define ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS 1000 + +enum ath12k_dp_rx_decap_type { + DP_RX_DECAP_TYPE_RAW, + DP_RX_DECAP_TYPE_NATIVE_WIFI, + DP_RX_DECAP_TYPE_ETHERNET2_DIX, + DP_RX_DECAP_TYPE_8023, +}; + +struct ath12k_dp_rx_rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 snap_type; +} __packed; + +static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi) +{ + u32 ret = 0; + + switch (sgi) { + case RX_MSDU_START_SGI_0_8_US: + ret = NL80211_RATE_INFO_HE_GI_0_8; + break; + case RX_MSDU_START_SGI_1_6_US: + ret = NL80211_RATE_INFO_HE_GI_1_6; + break; + case RX_MSDU_START_SGI_3_2_US: + ret = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + return ret; +} + +int ath12k_dp_rx_ampdu_start(struct ath12k *ar, + struct ieee80211_ampdu_params *params); +int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, + struct ieee80211_ampdu_params *params); +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, + const u8 *peer_addr, + enum set_key_cmd key_cmd, + struct ieee80211_key_conf *key); +void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer); +void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, + struct ath12k_peer *peer, u8 tid); +int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, + u8 tid, u32 ba_win_sz, u16 ssn, + enum hal_pn_type pn_type); +void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, + struct sk_buff *skb); +int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab); +void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab); +int ath12k_dp_rx_htt_setup(struct ath12k_base *ab); +int ath12k_dp_rx_alloc(struct ath12k_base *ab); +void ath12k_dp_rx_free(struct ath12k_base *ab); +int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int pdev_idx); +void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int pdev_idx); +void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab); +void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab); +int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, + struct napi_struct *napi, int budget); +int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, + int budget); +int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id, + struct napi_struct *napi, + int budget); +int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + bool hw_cc); +int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar); +int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id); + +int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab); +int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer); +u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, + struct hal_rx_desc *desc); +struct ath12k_peer * +ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); +u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, + struct hal_rx_desc *desc); +u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, + struct hal_rx_desc *desc); +void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status); +struct ath12k_peer * +ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); + +int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); +int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); + +#endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c new file mode 100644 index 000000000000..95294f35155c --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -0,0 +1,1211 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "dp_tx.h" +#include "debug.h" +#include "hw.h" + +static enum hal_tcl_encap_type +ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) + return HAL_TCL_ENCAP_TYPE_ETHERNET; + + return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI; +} + +static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u8 *qos_ctl; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + qos_ctl = ieee80211_get_qos_ctl(hdr); + memmove(skb->data + IEEE80211_QOS_CTL_LEN, + skb->data, (void *)qos_ctl - (void *)skb->data); + skb_pull(skb, IEEE80211_QOS_CTL_LEN); + + hdr = (void *)skb->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); +} + +static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb); + + if (cb->flags & ATH12K_SKB_HW_80211_ENCAP) + return skb->priority & IEEE80211_QOS_CTL_TID_MASK; + else if (!ieee80211_is_data_qos(hdr->frame_control)) + return HAL_DESC_REO_NON_QOS_TID; + else + return skb->priority & IEEE80211_QOS_CTL_TID_MASK; +} + +enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return HAL_ENCRYPT_TYPE_WEP_40; + case WLAN_CIPHER_SUITE_WEP104: + return HAL_ENCRYPT_TYPE_WEP_104; + case WLAN_CIPHER_SUITE_TKIP: + return HAL_ENCRYPT_TYPE_TKIP_MIC; + case WLAN_CIPHER_SUITE_CCMP: + return HAL_ENCRYPT_TYPE_CCMP_128; + case WLAN_CIPHER_SUITE_CCMP_256: + return HAL_ENCRYPT_TYPE_CCMP_256; + case WLAN_CIPHER_SUITE_GCMP: + return HAL_ENCRYPT_TYPE_GCMP_128; + case WLAN_CIPHER_SUITE_GCMP_256: + return HAL_ENCRYPT_TYPE_AES_GCMP_256; + default: + return HAL_ENCRYPT_TYPE_OPEN; + } +} + +static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp, + struct ath12k_tx_desc_info *tx_desc, + u8 pool_id) +{ + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]); + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); +} + +static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp, + u8 pool_id) +{ + struct ath12k_tx_desc_info *desc; + + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id], + struct ath12k_tx_desc_info, + list); + if (!desc) { + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n"); + return NULL; + } + + list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]); + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + + return desc; +} + +static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, + struct hal_tx_info *ti) +{ + struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd; + + tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr, + HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO); + tcl_ext_cmd->info1 = le32_encode_bits(0x0, + HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) | + le32_encode_bits(ti->data_len, + HAL_TX_MSDU_EXT_INFO1_BUF_LEN); + + tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | + le32_encode_bits(ti->encap_type, + HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) | + le32_encode_bits(ti->encrypt_type, + HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE); +} + +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, + struct sk_buff *skb) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct hal_tx_info ti = {0}; + struct ath12k_tx_desc_info *tx_desc; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); + struct hal_tcl_data_cmd *hal_tcl_desc; + struct hal_tx_msdu_ext_desc *msg; + struct sk_buff *skb_ext_desc; + struct hal_srng *tcl_ring; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct dp_tx_ring *tx_ring; + u8 pool_id; + u8 hal_ring_id; + int ret; + u8 ring_selector, ring_map = 0; + bool tcl_ring_retry; + bool msdu_ext_desc = false; + + if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + return -ESHUTDOWN; + + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control)) + return -ENOTSUPP; + + pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1); + + /* Let the default ring selection be based on current processor + * number, where one of the 3 tcl rings are selected based on + * the smp_processor_id(). In case that ring + * is full/busy, we resort to other available rings. + * If all rings are full, we drop the packet. + * TODO: Add throttling logic when all rings are full + */ + ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb); + +tcl_ring_sel: + tcl_ring_retry = false; + ti.ring_id = ring_selector % ab->hw_params->max_tx_ring; + + ring_map |= BIT(ti.ring_id); + ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id; + + tx_ring = &dp->tx_ring[ti.ring_id]; + + tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id); + if (!tx_desc) + return -ENOMEM; + + ti.bank_id = arvif->bank_id; + ti.meta_data_flags = arvif->tcl_metadata; + + if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) { + if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) { + ti.encrypt_type = + ath12k_dp_tx_get_encrypt_type(skb_cb->cipher); + + if (ieee80211_has_protected(hdr->frame_control)) + skb_put(skb, IEEE80211_CCMP_MIC_LEN); + } else { + ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; + } + + msdu_ext_desc = true; + } + + ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb); + ti.addr_search_flags = arvif->hal_addr_search_flags; + ti.search_type = arvif->search_type; + ti.type = HAL_TCL_DESC_TYPE_BUFFER; + ti.pkt_offset = 0; + ti.lmac_id = ar->lmac_id; + ti.vdev_id = arvif->vdev_id; + ti.bss_ast_hash = arvif->ast_hash; + ti.bss_ast_idx = arvif->ast_idx; + ti.dscp_tid_tbl_idx = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL && + ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) { + ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) | + u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) | + u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) | + u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) | + u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN); + } + + ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE); + + ti.tid = ath12k_dp_tx_get_tid(skb); + + switch (ti.encap_type) { + case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI: + ath12k_dp_tx_encap_nwifi(skb); + break; + case HAL_TCL_ENCAP_TYPE_RAW: + if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { + ret = -EINVAL; + goto fail_remove_tx_buf; + } + break; + case HAL_TCL_ENCAP_TYPE_ETHERNET: + /* no need to encap */ + break; + case HAL_TCL_ENCAP_TYPE_802_3: + default: + /* TODO: Take care of other encap modes as well */ + ret = -EINVAL; + atomic_inc(&ab->soc_stats.tx_err.misc_fail); + goto fail_remove_tx_buf; + } + + ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(ab->dev, ti.paddr)) { + atomic_inc(&ab->soc_stats.tx_err.misc_fail); + ath12k_warn(ab, "failed to DMA map data Tx buffer\n"); + ret = -ENOMEM; + goto fail_remove_tx_buf; + } + + tx_desc->skb = skb; + tx_desc->mac_id = ar->pdev_idx; + ti.desc_id = tx_desc->desc_id; + ti.data_len = skb->len; + skb_cb->paddr = ti.paddr; + skb_cb->vif = arvif->vif; + skb_cb->ar = ar; + + if (msdu_ext_desc) { + skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc)); + if (!skb_ext_desc) { + ret = -ENOMEM; + goto fail_unmap_dma; + } + + skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc)); + memset(skb_ext_desc->data, 0, skb_ext_desc->len); + + msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data; + ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti); + + ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data, + skb_ext_desc->len, DMA_TO_DEVICE); + ret = dma_mapping_error(ab->dev, ti.paddr); + if (ret) { + kfree(skb_ext_desc); + goto fail_unmap_dma; + } + + ti.data_len = skb_ext_desc->len; + ti.type = HAL_TCL_DESC_TYPE_EXT_DESC; + + skb_cb->paddr_ext_desc = ti.paddr; + } + + hal_ring_id = tx_ring->tcl_data_ring.ring_id; + tcl_ring = &ab->hal.srng_list[hal_ring_id]; + + spin_lock_bh(&tcl_ring->lock); + + ath12k_hal_srng_access_begin(ab, tcl_ring); + + hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring); + if (!hal_tcl_desc) { + /* NOTE: It is highly unlikely we'll be running out of tcl_ring + * desc because the desc is directly enqueued onto hw queue. + */ + ath12k_hal_srng_access_end(ab, tcl_ring); + ab->soc_stats.tx_err.desc_na[ti.ring_id]++; + spin_unlock_bh(&tcl_ring->lock); + ret = -ENOMEM; + + /* Checking for available tcl descritors in another ring in + * case of failure due to full tcl ring now, is better than + * checking this ring earlier for each pkt tx. + * Restart ring selection if some rings are not checked yet. + */ + if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) && + ab->hw_params->tcl_ring_retry) { + tcl_ring_retry = true; + ring_selector++; + } + + goto fail_unmap_dma; + } + + ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti); + + ath12k_hal_srng_access_end(ab, tcl_ring); + + spin_unlock_bh(&tcl_ring->lock); + + ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ", + skb->data, skb->len); + + atomic_inc(&ar->dp.num_tx_pending); + + return 0; + +fail_unmap_dma: + dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + +fail_remove_tx_buf: + ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); + if (tcl_ring_retry) + goto tcl_ring_sel; + + return ret; +} + +static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, + struct sk_buff *msdu, u8 mac_id, + struct dp_tx_ring *tx_ring) +{ + struct ath12k *ar; + struct ath12k_skb_cb *skb_cb; + + skb_cb = ATH12K_SKB_CB(msdu); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + dev_kfree_skb_any(msdu); + + ar = ab->pdevs[mac_id].ar; + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); +} + +static void +ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, + struct sk_buff *msdu, + struct dp_tx_ring *tx_ring, + struct ath12k_dp_htt_wbm_tx_status *ts) +{ + struct ieee80211_tx_info *info; + struct ath12k_skb_cb *skb_cb; + struct ath12k *ar; + + skb_cb = ATH12K_SKB_CB(msdu); + info = IEEE80211_SKB_CB(msdu); + + ar = skb_cb->ar; + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + memset(&info->status, 0, sizeof(info->status)); + + if (ts->acked) { + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + + ts->ack_rssi; + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + } else { + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + } + } + + ieee80211_tx_status(ar->hw, msdu); +} + +static void +ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, + void *desc, u8 mac_id, + struct sk_buff *msdu, + struct dp_tx_ring *tx_ring) +{ + struct htt_tx_wbm_completion *status_desc; + struct ath12k_dp_htt_wbm_tx_status ts = {0}; + enum hal_wbm_htt_tx_comp_status wbm_status; + + status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; + + wbm_status = le32_get_bits(status_desc->info0, + HTT_TX_WBM_COMP_INFO0_STATUS); + + switch (wbm_status) { + case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: + ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); + ts.ack_rssi = le32_get_bits(status_desc->info2, + HTT_TX_WBM_COMP_INFO2_ACK_RSSI); + ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); + break; + case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: + ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); + break; + case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY: + /* This event is to be handled only when the driver decides to + * use WDS offload functionality. + */ + break; + default: + ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status); + break; + } +} + +static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_tx_info *info; + struct ath12k_skb_cb *skb_cb; + + if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) { + /* Must not happen */ + return; + } + + skb_cb = ATH12K_SKB_CB(msdu); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + rcu_read_lock(); + + if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { + dev_kfree_skb_any(msdu); + goto exit; + } + + if (!skb_cb->vif) { + dev_kfree_skb_any(msdu); + goto exit; + } + + info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + + /* skip tx rate update from ieee80211_status*/ + info->status.rates[0].idx = -1; + + if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && + !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + + ts->ack_rssi; + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + } + + if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && + (info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + + /* NOTE: Tx rate status reporting. Tx completion status does not have + * necessary information (for example nss) to build the tx rate. + * Might end up reporting it out-of-band from HTT stats. + */ + + ieee80211_tx_status(ar->hw, msdu); + +exit: + rcu_read_unlock(); +} + +static void ath12k_dp_tx_status_parse(struct ath12k_base *ab, + struct hal_wbm_completion_ring_tx *desc, + struct hal_tx_status *ts) +{ + ts->buf_rel_source = + le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE); + if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && + ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) + return; + + if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) + return; + + ts->status = le32_get_bits(desc->info0, + HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON); + + ts->ppdu_id = le32_get_bits(desc->info1, + HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER); + if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID) + ts->rate_stats = le32_to_cpu(desc->rate_stats.info0); + else + ts->rate_stats = 0; +} + +void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) +{ + struct ath12k *ar; + struct ath12k_dp *dp = &ab->dp; + int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; + struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id]; + struct ath12k_tx_desc_info *tx_desc = NULL; + struct sk_buff *msdu; + struct hal_tx_status ts = { 0 }; + struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; + struct hal_wbm_release_ring *desc; + u8 mac_id; + u64 desc_va; + + spin_lock_bh(&status_ring->lock); + + ath12k_hal_srng_access_begin(ab, status_ring); + + while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) { + desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring); + if (!desc) + break; + + memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], + desc, sizeof(*desc)); + tx_ring->tx_status_head = + ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head); + } + + if (ath12k_hal_srng_dst_peek(ab, status_ring) && + (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) { + /* TODO: Process pending tx_status messages when kfifo_is_full() */ + ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); + } + + ath12k_hal_srng_access_end(ab, status_ring); + + spin_unlock_bh(&status_ring->lock); + + while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) { + struct hal_wbm_completion_ring_tx *tx_status; + u32 desc_id; + + tx_ring->tx_status_tail = + ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail); + tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail]; + ath12k_dp_tx_status_parse(ab, tx_status, &ts); + + if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) { + /* HW done cookie conversion */ + desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 | + le32_to_cpu(tx_status->buf_va_lo)); + tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va); + } else { + /* SW does cookie conversion to VA */ + desc_id = le32_get_bits(tx_status->buf_va_hi, + BUFFER_ADDR_INFO1_SW_COOKIE); + + tx_desc = ath12k_dp_get_tx_desc(ab, desc_id); + } + if (!tx_desc) { + ath12k_warn(ab, "unable to retrieve tx_desc!"); + continue; + } + + msdu = tx_desc->skb; + mac_id = tx_desc->mac_id; + + /* Release descriptor as soon as extracting necessary info + * to reduce contention + */ + ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id); + if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) { + ath12k_dp_tx_process_htt_tx_complete(ab, + (void *)tx_status, + mac_id, msdu, + tx_ring); + continue; + } + + ar = ab->pdevs[mac_id].ar; + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + + ath12k_dp_tx_complete_msdu(ar, msdu, &ts); + } +} + +static int +ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, + int mac_id, u32 ring_id, + enum hal_ring_type ring_type, + enum htt_srng_ring_type *htt_ring_type, + enum htt_srng_ring_id *htt_ring_id) +{ + int ret = 0; + + switch (ring_type) { + case HAL_RXDMA_BUF: + /* for some targets, host fills rx buffer to fw and fw fills to + * rxbuf ring for each rxdma + */ + if (!ab->hw_params->rx_mac_buf_ring) { + if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 || + ring_id == HAL_SRNG_SW2RXDMA_BUF1)) { + ret = -EINVAL; + } + *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + } else { + if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) { + *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; + *htt_ring_type = HTT_SW_TO_SW_RING; + } else { + *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + } + } + break; + case HAL_RXDMA_DST: + *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; + *htt_ring_type = HTT_HW_TO_SW_RING; + break; + case HAL_RXDMA_MONITOR_BUF: + *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + break; + case HAL_RXDMA_MONITOR_STATUS: + *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + break; + case HAL_RXDMA_MONITOR_DST: + *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + *htt_ring_type = HTT_HW_TO_SW_RING; + break; + case HAL_RXDMA_MONITOR_DESC: + *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + break; + case HAL_TX_MONITOR_BUF: + *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING; + *htt_ring_type = HTT_SW_TO_HW_RING; + break; + case HAL_TX_MONITOR_DST: + *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING; + *htt_ring_type = HTT_HW_TO_SW_RING; + break; + default: + ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); + ret = -EINVAL; + } + return ret; +} + +int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type) +{ + struct htt_srng_setup_cmd *cmd; + struct hal_srng *srng = &ab->hal.srng_list[ring_id]; + struct hal_srng_params params; + struct sk_buff *skb; + u32 ring_entry_sz; + int len = sizeof(*cmd); + dma_addr_t hp_addr, tp_addr; + enum htt_srng_ring_type htt_ring_type; + enum htt_srng_ring_id htt_ring_id; + int ret; + + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + memset(¶ms, 0, sizeof(params)); + ath12k_hal_srng_get_params(ab, srng, ¶ms); + + hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng); + tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng); + + ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, + ring_type, &htt_ring_type, + &htt_ring_id); + if (ret) + goto err_free; + + skb_put(skb, len); + cmd = (struct htt_srng_setup_cmd *)skb->data; + cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP, + HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE); + if (htt_ring_type == HTT_SW_TO_HW_RING || + htt_ring_type == HTT_HW_TO_SW_RING) + cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id), + HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID); + else + cmd->info0 |= le32_encode_bits(mac_id, + HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID); + cmd->info0 |= le32_encode_bits(htt_ring_type, + HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE); + cmd->info0 |= le32_encode_bits(htt_ring_id, + HTT_SRNG_SETUP_CMD_INFO0_RING_ID); + + cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr & + HAL_ADDR_LSB_REG_MASK); + + cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >> + HAL_ADDR_MSB_REG_SHIFT); + + ret = ath12k_hal_srng_get_entrysize(ab, ring_type); + if (ret < 0) + goto err_free; + + ring_entry_sz = ret; + + ring_entry_sz >>= 2; + cmd->info1 = le32_encode_bits(ring_entry_sz, + HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE); + cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz, + HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE); + cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), + HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP); + cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), + HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP); + cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP), + HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP); + if (htt_ring_type == HTT_SW_TO_HW_RING) + cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS); + + cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr)); + cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr)); + + cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr)); + cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr)); + + cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr)); + cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr)); + cmd->msi_data = cpu_to_le32(params.msi_data); + + cmd->intr_info = + le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz, + HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH); + cmd->intr_info |= + le32_encode_bits(params.intr_timer_thres_us >> 3, + HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH); + + cmd->info2 = 0; + if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { + cmd->info2 = le32_encode_bits(params.low_threshold, + HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH); + } + + ath12k_dbg(ab, ATH12K_DBG_HAL, + "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n", + __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, + cmd->msi_data); + + ath12k_dbg(ab, ATH12K_DBG_HAL, + "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n", + ring_id, ring_type, cmd->intr_info, cmd->info2); + + ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + + return ret; +} + +#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) + +int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab) +{ + struct ath12k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ver_req_cmd *cmd; + int len = sizeof(*cmd); + int ret; + + init_completion(&dp->htt_tgt_version_received); + + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_ver_req_cmd *)skb->data; + cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ, + HTT_VER_REQ_INFO_MSG_ID); + + ret = ath12k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + ret = wait_for_completion_timeout(&dp->htt_tgt_version_received, + HTT_TARGET_VERSION_TIMEOUT_HZ); + if (ret == 0) { + ath12k_warn(ab, "htt target version request timed out\n"); + return -ETIMEDOUT; + } + + if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { + ath12k_err(ab, "unsupported htt major version %d supported version is %d\n", + dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); + return -ENOTSUPP; + } + + return 0; +} + +int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ppdu_stats_cfg_cmd *cmd; + int len = sizeof(*cmd); + u8 pdev_mask; + int ret; + int i; + + for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; + cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, + HTT_PPDU_STATS_CFG_MSG_TYPE); + + pdev_mask = 1 << (i + 1); + cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID); + cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK); + + ret = ath12k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + } + + return 0; +} + +int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int rx_buf_size, + struct htt_rx_ring_tlv_filter *tlv_filter) +{ + struct htt_rx_ring_selection_cfg_cmd *cmd; + struct hal_srng *srng = &ab->hal.srng_list[ring_id]; + struct hal_srng_params params; + struct sk_buff *skb; + int len = sizeof(*cmd); + enum htt_srng_ring_type htt_ring_type; + enum htt_srng_ring_id htt_ring_id; + int ret; + + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + memset(¶ms, 0, sizeof(params)); + ath12k_hal_srng_get_params(ab, srng, ¶ms); + + ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, + ring_type, &htt_ring_type, + &htt_ring_id); + if (ret) + goto err_free; + + skb_put(skb, len); + cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data; + cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE); + if (htt_ring_type == HTT_SW_TO_HW_RING || + htt_ring_type == HTT_HW_TO_SW_RING) + cmd->info0 |= + le32_encode_bits(DP_SW2HW_MACID(mac_id), + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); + else + cmd->info0 |= + le32_encode_bits(mac_id, + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); + cmd->info0 |= le32_encode_bits(htt_ring_id, + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID); + cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS); + cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS); + cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid, + HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID); + cmd->info1 = le32_encode_bits(rx_buf_size, + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE); + cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0); + cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1); + cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2); + cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3); + cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter); + + if (tlv_filter->offset_valid) { + cmd->rx_packet_offset = + le32_encode_bits(tlv_filter->rx_packet_offset, + HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET); + + cmd->rx_packet_offset |= + le32_encode_bits(tlv_filter->rx_header_offset, + HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET); + + cmd->rx_mpdu_offset = + le32_encode_bits(tlv_filter->rx_mpdu_end_offset, + HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET); + + cmd->rx_mpdu_offset |= + le32_encode_bits(tlv_filter->rx_mpdu_start_offset, + HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET); + + cmd->rx_msdu_offset = + le32_encode_bits(tlv_filter->rx_msdu_end_offset, + HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET); + + cmd->rx_msdu_offset |= + le32_encode_bits(tlv_filter->rx_msdu_start_offset, + HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET); + + cmd->rx_attn_offset = + le32_encode_bits(tlv_filter->rx_attn_offset, + HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET); + } + + ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + + return ret; +} + +int +ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, + struct htt_ext_stats_cfg_params *cfg_params, + u64 cookie) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ext_stats_cfg_cmd *cmd; + int len = sizeof(*cmd); + int ret; + + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_ext_stats_cfg_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; + + cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + + cmd->hdr.stats_type = type; + cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0); + cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1); + cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2); + cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3); + cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie)); + cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie)); + + ret = ath12k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + ath12k_warn(ab, "failed to send htt type stats request: %d", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset) +{ + struct ath12k_base *ab = ar->ab; + int ret; + + ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); + if (ret) { + ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret); + return ret; + } + + ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); + if (ret) { + ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret); + return ret; + } + + return 0; +} + +int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + int ret, ring_id; + + ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; + tlv_filter.offset_valid = false; + + if (!reset) { + tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING; + tlv_filter.pkt_filter_flags0 = + HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | + HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; + tlv_filter.pkt_filter_flags1 = + HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 | + HTT_RX_MON_MO_MGMT_FILTER_FLAGS1; + tlv_filter.pkt_filter_flags2 = + HTT_RX_MON_FP_CTRL_FILTER_FLASG2 | + HTT_RX_MON_MO_CTRL_FILTER_FLASG2; + tlv_filter.pkt_filter_flags3 = + HTT_RX_MON_FP_CTRL_FILTER_FLASG3 | + HTT_RX_MON_MO_CTRL_FILTER_FLASG3 | + HTT_RX_MON_FP_DATA_FILTER_FLASG3 | + HTT_RX_MON_MO_DATA_FILTER_FLASG3; + } + + if (ab->hw_params->rxdma1_enable) { + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0, + HAL_RXDMA_MONITOR_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for monitor buf %d\n", ret); + return ret; + } + } + + return 0; +} + +int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int tx_buf_size, + struct htt_tx_ring_tlv_filter *htt_tlv_filter) +{ + struct htt_tx_ring_selection_cfg_cmd *cmd; + struct hal_srng *srng = &ab->hal.srng_list[ring_id]; + struct hal_srng_params params; + struct sk_buff *skb; + int len = sizeof(*cmd); + enum htt_srng_ring_type htt_ring_type; + enum htt_srng_ring_id htt_ring_id; + int ret; + + skb = ath12k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + memset(¶ms, 0, sizeof(params)); + ath12k_hal_srng_get_params(ab, srng, ¶ms); + + ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, + ring_type, &htt_ring_type, + &htt_ring_id); + + if (ret) + goto err_free; + + skb_put(skb, len); + cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data; + cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG, + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE); + if (htt_ring_type == HTT_SW_TO_HW_RING || + htt_ring_type == HTT_HW_TO_SW_RING) + cmd->info0 |= + le32_encode_bits(DP_SW2HW_MACID(mac_id), + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); + else + cmd->info0 |= + le32_encode_bits(mac_id, + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); + cmd->info0 |= le32_encode_bits(htt_ring_id, + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID); + cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS); + cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), + HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS); + + cmd->info1 |= + le32_encode_bits(tx_buf_size, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE); + + if (htt_tlv_filter->tx_mon_mgmt_filter) { + cmd->info1 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); + cmd->info1 |= + le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT); + cmd->info2 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT, + HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); + } + + if (htt_tlv_filter->tx_mon_data_filter) { + cmd->info1 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); + cmd->info1 |= + le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL); + cmd->info2 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL, + HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); + } + + if (htt_tlv_filter->tx_mon_ctrl_filter) { + cmd->info1 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); + cmd->info1 |= + le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, + HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA); + cmd->info2 |= + le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA, + HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); + } + + cmd->tlv_filter_mask_in0 = + cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags); + cmd->tlv_filter_mask_in1 = + cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0); + cmd->tlv_filter_mask_in2 = + cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1); + cmd->tlv_filter_mask_in3 = + cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2); + + ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + return ret; +} + +int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = &ab->dp; + struct htt_tx_ring_tlv_filter tlv_filter = {0}; + int ret, ring_id; + + ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; + + /* TODO: Need to set upstream/downstream tlv filters + * here + */ + + if (ab->hw_params->rxdma1_enable) { + ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0, + HAL_TX_MONITOR_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for monitor buf %d\n", ret); + return ret; + } + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h new file mode 100644 index 000000000000..436d77e5e9ee --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/dp_tx.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_DP_TX_H +#define ATH12K_DP_TX_H + +#include "core.h" +#include "hal_tx.h" + +struct ath12k_dp_htt_wbm_tx_status { + bool acked; + int ack_rssi; +}; + +int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, + struct sk_buff *skb); +void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id); + +int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask); +int +ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, + struct htt_ext_stats_cfg_params *cfg_params, + u64 cookie); +int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset); + +int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int rx_buf_size, + struct htt_rx_ring_tlv_filter *tlv_filter); +void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id); +int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int tx_buf_size, + struct htt_tx_ring_tlv_filter *htt_tlv_filter); +int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset); +int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset); +#endif diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c new file mode 100644 index 000000000000..95d04819083f --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -0,0 +1,2222 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/dma-mapping.h> +#include "hal_tx.h" +#include "hal_rx.h" +#include "debug.h" +#include "hal_desc.h" +#include "hif.h" + +static const struct hal_srng_config hw_srng_config_template[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + [HAL_REO_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, + .max_rings = 8, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_EXCEPTION] = { + /* Designating REO2SW0 ring as exception ring. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_RING_ID_REO2SW0, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_REINJECT] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2REO, + .max_rings = 4, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_DATA] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, + .max_rings = 6, + .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, + .max_rings = 1, + .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_status_ring)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_SRC] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM_IDLE_LINK] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, + }, + [HAL_SW2WBM_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE, + .max_rings = 2, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM2SW_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, + .max_rings = 8, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_RXDMA_BUF] = { + .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_DMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, + .max_rings = 0, + .entry_size = 0, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_BUF] = { + .start_ring_id = HAL_SRNG_SW2RXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_STATUS] = { 0, }, + [HAL_RXDMA_MONITOR_DESC] = { 0, }, + [HAL_RXDMA_DIR_BUF] = { + .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, + .max_rings = 2, + .entry_size = 8 >> 2, /* TODO: Define the struct */ + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_PPE2TCL] = { + .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1, + .max_rings = 1, + .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_PPE_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TX_MONITOR_BUF] = { + .start_ring_id = HAL_SRNG_SW2TXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_TX_MONITOR_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + } +}; + +static const struct ath12k_hal_tcl_to_wbm_rbm_map +ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = { + { + .wbm_ring_num = 0, + .rbm_id = HAL_RX_BUF_RBM_SW0_BM, + }, + { + .wbm_ring_num = 1, + .rbm_id = HAL_RX_BUF_RBM_SW1_BM, + }, + { + .wbm_ring_num = 2, + .rbm_id = HAL_RX_BUF_RBM_SW2_BM, + }, + { + .wbm_ring_num = 4, + .rbm_id = HAL_RX_BUF_RBM_SW4_BM, + } +}; + +static const struct ath12k_hal_tcl_to_wbm_rbm_map +ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = { + { + .wbm_ring_num = 0, + .rbm_id = HAL_RX_BUF_RBM_SW0_BM, + }, + { + .wbm_ring_num = 2, + .rbm_id = HAL_RX_BUF_RBM_SW2_BM, + }, + { + .wbm_ring_num = 4, + .rbm_id = HAL_RX_BUF_RBM_SW4_BM, + }, +}; + +static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab) +{ + return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab); +} + +static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5, + RX_MSDU_END_INFO5_FIRST_MSDU); +} + +static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5, + RX_MSDU_END_INFO5_LAST_MSDU); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.qcn9274.msdu_end.info5, + RX_MSDU_END_INFO5_L3_HDR_PADDING); +} + +static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, + RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); +} + +static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.mpdu_start.info2, + RX_MPDU_START_INFO2_ENC_TYPE); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info11, + RX_MSDU_END_INFO11_DECAP_FORMAT); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info11, + RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); +} + +static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); +} + +static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); +} + +static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_NUM); +} + +static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info10, + RX_MSDU_END_INFO10_MSDU_LENGTH); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info12, + RX_MSDU_END_INFO12_SGI); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info12, + RX_MSDU_END_INFO12_RATE_MCS); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info12, + RX_MSDU_END_INFO12_RECV_BW); +} + +static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info12, + RX_MSDU_END_INFO12_PKT_TYPE); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.qcn9274.msdu_end.info12, + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); +} + +static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.qcn9274.msdu_end.info5, + RX_MSDU_END_INFO5_TID); +} + +static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id); +} + +static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end, + sizeof(struct rx_msdu_end_qcn9274)); +} + +static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id); +} + +static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10); + + info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH; + info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH); + + desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info); +} + +static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.qcn9274.msdu_payload[0]; +} + +static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void) +{ + return offsetof(struct hal_rx_desc_qcn9274, mpdu_start); +} + +static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void) +{ + return offsetof(struct hal_rx_desc_qcn9274, msdu_end); +} + +static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR2_VALID; +} + +static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.qcn9274.mpdu_start.addr2; +} + +static bool ath12k_hw_qcn9274_rx_desc_is_mcbc(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) & + RX_MPDU_START_INFO6_MCAST_BCAST; +} + +static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) +{ + hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl; + hdr->duration_id = desc->u.qcn9274.mpdu_start.duration; + ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1); + ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2); + ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3); + if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { + ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4); + } + hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl; +} + +static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) +{ + unsigned int key_id; + + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]); + crypto_hdr[1] = 0; + crypto_hdr[2] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]); + break; + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]); + crypto_hdr[1] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]); + crypto_hdr[2] = 0; + break; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + return; + } + key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5, + RX_MPDU_START_INFO5_KEY_ID); + crypto_hdr[3] = 0x20 | (key_id << 6); + crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]); + crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]); + crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]); + crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]); +} + +static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl); +} + +static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_srng_config *s; + + hal->srng_config = kmemdup(hw_srng_config_template, + sizeof(hw_srng_config_template), + GFP_KERNEL); + if (!hal->srng_config) + return -ENOMEM; + + s = &hal->srng_config[HAL_REO_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; + s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; + + s = &hal->srng_config[HAL_REO_EXCEPTION]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; + + s = &hal->srng_config[HAL_REO_REINJECT]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; + s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP; + + s = &hal->srng_config[HAL_REO_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; + + s = &hal->srng_config[HAL_REO_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; + + s = &hal->srng_config[HAL_TCL_DATA]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB; + s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; + + s = &hal->srng_config[HAL_TCL_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; + + s = &hal->srng_config[HAL_TCL_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + + s = &hal->srng_config[HAL_CE_SRC]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + + s = &hal->srng_config[HAL_CE_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) - + HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + + /* Some LMAC rings are not accesed from the host: + * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS, + * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC, + * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA + */ + s = &hal->srng_config[HAL_PPE2TCL]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP; + + s = &hal->srng_config[HAL_PPE_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP; + + return 0; +} + +static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14, + RX_MSDU_END_INFO14_MSDU_DONE); +} + +static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13, + RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); +} + +static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13, + RX_MSDU_END_INFO13_IP_CKSUM_FAIL); +} + +static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) +{ + return (le32_get_bits(desc->u.qcn9274.msdu_end.info14, + RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == + RX_DESC_DECRYPT_STATUS_CODE_OK); +} + +static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) +{ + u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13); + u32 errmap = 0; + + if (info & RX_MSDU_END_INFO13_FCS_ERR) + errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + + return errmap; +} + +const struct hal_ops hal_qcn9274_ops = { + .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes, + .rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv, + .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len, + .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload, + .rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset, + .rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset, + .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2, + .rx_desc_is_mcbc = ath12k_hw_qcn9274_rx_desc_is_mcbc, + .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr, + .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr, + .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl, + .create_srng_config = ath12k_hal_srng_create_config_qcn9274, + .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map, + .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done, + .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail, + .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail, + .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted, + .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err, +}; + +static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_FIRST_MSDU); +} + +static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc) +{ + return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_LAST_MSDU); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_L3_HDR_PADDING); +} + +static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_ENC_TYPE); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info11, + RX_MSDU_END_INFO11_DECAP_FORMAT); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info11, + RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); +} + +static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); +} + +static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); +} + +static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_NUM); +} + +static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info10, + RX_MSDU_END_INFO10_MSDU_LENGTH); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_SGI); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_RATE_MCS); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_RECV_BW); +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_PKT_TYPE); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) +{ + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); +} + +static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) +{ + return le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_TID); +} + +static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id); +} + +static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, + sizeof(struct rx_msdu_end_qcn9274)); +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) +{ + return le64_get_bits(desc->u.wcn7850.mpdu_start_tag, + HAL_TLV_HDR_TAG); +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id); +} + +static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) +{ + u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10); + + info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH; + info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH); + + desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info); +} + +static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) +{ + return &desc->u.wcn7850.msdu_payload[0]; +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void) +{ + return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag); +} + +static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void) +{ + return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag); +} + +static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR2_VALID; +} + +static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.wcn7850.mpdu_start.addr2; +} + +static bool ath12k_hw_wcn7850_rx_desc_is_mcbc(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info6) & + RX_MPDU_START_INFO6_MCAST_BCAST; +} + +static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) +{ + hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl; + hdr->duration_id = desc->u.wcn7850.mpdu_start.duration; + ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1); + ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2); + ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3); + if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { + ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4); + } + hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl; +} + +static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) +{ + unsigned int key_id; + + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[1] = 0; + crypto_hdr[2] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); + break; + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[1] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[2] = 0; + break; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + return; + } + key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5), + RX_MPDU_START_INFO5_KEY_ID); + crypto_hdr[3] = 0x20 | (key_id << 6); + crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]); + crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]); +} + +static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc) +{ + return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl); +} + +static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_srng_config *s; + + hal->srng_config = kmemdup(hw_srng_config_template, + sizeof(hw_srng_config_template), + GFP_KERNEL); + if (!hal->srng_config) + return -ENOMEM; + + s = &hal->srng_config[HAL_REO_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; + s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; + + s = &hal->srng_config[HAL_REO_EXCEPTION]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; + + s = &hal->srng_config[HAL_REO_REINJECT]; + s->max_rings = 1; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; + + s = &hal->srng_config[HAL_REO_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; + + s = &hal->srng_config[HAL_REO_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; + + s = &hal->srng_config[HAL_TCL_DATA]; + s->max_rings = 5; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB; + s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; + + s = &hal->srng_config[HAL_TCL_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; + + s = &hal->srng_config[HAL_TCL_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + + s = &hal->srng_config[HAL_CE_SRC]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG; + + s = &hal->srng_config[HAL_CE_DST]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG; + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->max_rings = 1; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_RXDMA_BUF]; + s->max_rings = 2; + s->mac_type = ATH12K_HAL_SRNG_PMAC; + + s = &hal->srng_config[HAL_RXDMA_DST]; + s->max_rings = 1; + s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2; + + /* below rings are not used */ + s = &hal->srng_config[HAL_RXDMA_DIR_BUF]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE2TCL]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE_RELEASE]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_TX_MONITOR_BUF]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_TX_MONITOR_DST]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE2TCL]; + s->max_rings = 0; + + return 0; +} + +static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14, + RX_MSDU_END_INFO14_MSDU_DONE); +} + +static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, + RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); +} + +static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) +{ + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, + RX_MSDU_END_INFO13_IP_CKSUM_FAIL); +} + +static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) +{ + return (le32_get_bits(desc->u.wcn7850.msdu_end.info14, + RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == + RX_DESC_DECRYPT_STATUS_CODE_OK); +} + +static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) +{ + u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13); + u32 errmap = 0; + + if (info & RX_MSDU_END_INFO13_FCS_ERR) + errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + + return errmap; +} + +const struct hal_ops hal_wcn7850_ops = { + .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes, + .rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl, + .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv, + .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len, + .rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload, + .rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset, + .rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset, + .rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2, + .rx_desc_is_mcbc = ath12k_hw_wcn7850_rx_desc_is_mcbc, + .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr, + .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr, + .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl, + .create_srng_config = ath12k_hal_srng_create_config_wcn7850, + .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map, + .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done, + .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail, + .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail, + .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted, + .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err, +}; + +static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + size_t size; + + size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; + hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, + GFP_KERNEL); + if (!hal->rdp.vaddr) + return -ENOMEM; + + return 0; +} + +static void ath12k_hal_free_cont_rdp(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + size_t size; + + if (!hal->rdp.vaddr) + return; + + size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; + dma_free_coherent(ab->dev, size, + hal->rdp.vaddr, hal->rdp.paddr); + hal->rdp.vaddr = NULL; +} + +static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + size_t size; + + size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS); + hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, + GFP_KERNEL); + if (!hal->wrp.vaddr) + return -ENOMEM; + + return 0; +} + +static void ath12k_hal_free_cont_wrp(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + size_t size; + + if (!hal->wrp.vaddr) + return; + + size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS); + dma_free_coherent(ab->dev, size, + hal->wrp.vaddr, hal->wrp.paddr); + hal->wrp.vaddr = NULL; +} + +static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab, + struct hal_srng *srng, int ring_num) +{ + struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST]; + u32 addr; + u32 val; + + addr = HAL_CE_DST_RING_CTRL + + srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + + ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; + + val = ath12k_hif_read32(ab, addr); + val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; + val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length, + HAL_CE_DST_R0_DEST_CTRL_MAX_LEN); + ath12k_hif_write32(ab, addr, val); +} + +static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab, + struct hal_srng *srng) +{ + struct ath12k_hal *hal = &ab->hal; + u32 val; + u64 hp_addr; + u32 reg_base; + + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; + + if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { + ath12k_hif_write32(ab, reg_base + + ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab), + srng->msi_addr); + + val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_REO1_RING_MSI1_BASE_MSB_ADDR) | + HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; + ath12k_hif_write32(ab, reg_base + + ath12k_hal_reo1_ring_msi1_base_msb_offset(ab), val); + + ath12k_hif_write32(ab, + reg_base + ath12k_hal_reo1_ring_msi1_data_offset(ab), + srng->msi_data); + } + + ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr); + + val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) | + u32_encode_bits((srng->entry_size * srng->num_entries), + HAL_REO1_RING_BASE_MSB_RING_SIZE); + ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_base_msb_offset(ab), val); + + val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) | + u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE); + ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_id_offset(ab), val); + + /* interrupt setup */ + val = u32_encode_bits((srng->intr_timer_thres_us >> 3), + HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD); + + val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size), + HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD); + + ath12k_hif_write32(ab, + reg_base + ath12k_hal_reo1_ring_producer_int_setup_offset(ab), + val); + + hp_addr = hal->rdp.paddr + + ((unsigned long)srng->u.dst_ring.hp_addr - + (unsigned long)hal->rdp.vaddr); + ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_lsb_offset(ab), + hp_addr & HAL_ADDR_LSB_REG_MASK); + ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_msb_offset(ab), + hp_addr >> HAL_ADDR_MSB_REG_SHIFT); + + /* Initialize head and tail pointers to indicate ring is empty */ + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; + ath12k_hif_write32(ab, reg_base, 0); + ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0); + *srng->u.dst_ring.hp_addr = 0; + + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; + val = 0; + if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) + val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP; + if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) + val |= HAL_REO1_RING_MISC_HOST_FW_SWAP; + if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) + val |= HAL_REO1_RING_MISC_MSI_SWAP; + val |= HAL_REO1_RING_MISC_SRNG_ENABLE; + + ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_misc_offset(ab), val); +} + +static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab, + struct hal_srng *srng) +{ + struct ath12k_hal *hal = &ab->hal; + u32 val; + u64 tp_addr; + u32 reg_base; + + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; + + if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { + ath12k_hif_write32(ab, reg_base + + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab), + srng->msi_addr); + + val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) | + HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; + ath12k_hif_write32(ab, reg_base + + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab), + val); + + ath12k_hif_write32(ab, reg_base + + HAL_TCL1_RING_MSI1_DATA_OFFSET(ab), + srng->msi_data); + } + + ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr); + + val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) | + u32_encode_bits((srng->entry_size * srng->num_entries), + HAL_TCL1_RING_BASE_MSB_RING_SIZE); + ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val); + + val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE); + ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val); + + val = u32_encode_bits(srng->intr_timer_thres_us, + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD); + + val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size), + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD); + + ath12k_hif_write32(ab, + reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab), + val); + + val = 0; + if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { + val |= u32_encode_bits(srng->u.src_ring.low_threshold, + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD); + } + ath12k_hif_write32(ab, + reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab), + val); + + if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) { + tp_addr = hal->rdp.paddr + + ((unsigned long)srng->u.src_ring.tp_addr - + (unsigned long)hal->rdp.vaddr); + ath12k_hif_write32(ab, + reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab), + tp_addr & HAL_ADDR_LSB_REG_MASK); + ath12k_hif_write32(ab, + reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab), + tp_addr >> HAL_ADDR_MSB_REG_SHIFT); + } + + /* Initialize head and tail pointers to indicate ring is empty */ + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; + ath12k_hif_write32(ab, reg_base, 0); + ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); + *srng->u.src_ring.tp_addr = 0; + + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; + val = 0; + if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) + val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP; + if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) + val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP; + if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) + val |= HAL_TCL1_RING_MISC_MSI_SWAP; + + /* Loop count is not used for SRC rings */ + val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE; + + val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; + + if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) + val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE; + + ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val); +} + +static void ath12k_hal_srng_hw_init(struct ath12k_base *ab, + struct hal_srng *srng) +{ + if (srng->ring_dir == HAL_SRNG_DIR_SRC) + ath12k_hal_srng_src_hw_init(ab, srng); + else + ath12k_hal_srng_dst_hw_init(ab, srng); +} + +static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab, + enum hal_ring_type type, + int ring_num, int mac_id) +{ + struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; + int ring_id; + + if (ring_num >= srng_config->max_rings) { + ath12k_warn(ab, "invalid ring number :%d\n", ring_num); + return -EINVAL; + } + + ring_id = srng_config->start_ring_id + ring_num; + if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC) + ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC; + + if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX)) + return -EINVAL; + + return ring_id; +} + +int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type) +{ + struct hal_srng_config *srng_config; + + if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) + return -EINVAL; + + srng_config = &ab->hal.srng_config[ring_type]; + + return (srng_config->entry_size << 2); +} + +int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type) +{ + struct hal_srng_config *srng_config; + + if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) + return -EINVAL; + + srng_config = &ab->hal.srng_config[ring_type]; + + return (srng_config->max_size / srng_config->entry_size); +} + +void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng, + struct hal_srng_params *params) +{ + params->ring_base_paddr = srng->ring_base_paddr; + params->ring_base_vaddr = srng->ring_base_vaddr; + params->num_entries = srng->num_entries; + params->intr_timer_thres_us = srng->intr_timer_thres_us; + params->intr_batch_cntr_thres_entries = + srng->intr_batch_cntr_thres_entries; + params->low_threshold = srng->u.src_ring.low_threshold; + params->msi_addr = srng->msi_addr; + params->msi2_addr = srng->msi2_addr; + params->msi_data = srng->msi_data; + params->msi2_data = srng->msi2_data; + params->flags = srng->flags; +} + +dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab, + struct hal_srng *srng) +{ + if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) + return 0; + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) + return ab->hal.wrp.paddr + + ((unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->hal.wrp.vaddr); + else + return ab->hal.rdp.paddr + + ((unsigned long)srng->u.dst_ring.hp_addr - + (unsigned long)ab->hal.rdp.vaddr); +} + +dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab, + struct hal_srng *srng) +{ + if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) + return 0; + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) + return ab->hal.rdp.paddr + + ((unsigned long)srng->u.src_ring.tp_addr - + (unsigned long)ab->hal.rdp.vaddr); + else + return ab->hal.wrp.paddr + + ((unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->hal.wrp.vaddr); +} + +u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type) +{ + switch (type) { + case HAL_CE_DESC_SRC: + return sizeof(struct hal_ce_srng_src_desc); + case HAL_CE_DESC_DST: + return sizeof(struct hal_ce_srng_dest_desc); + case HAL_CE_DESC_DST_STATUS: + return sizeof(struct hal_ce_srng_dst_status_desc); + } + + return 0; +} + +void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr, + u32 len, u32 id, u8 byte_swap_data) +{ + desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK); + desc->buffer_addr_info = + le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) | + le32_encode_bits(byte_swap_data, + HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) | + le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) | + le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN); + desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA); +} + +void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr) +{ + desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK); + desc->buffer_addr_info = + le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI); +} + +u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc) +{ + u32 len; + + len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN); + desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN); + + return len; +} + +void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, + dma_addr_t paddr) +{ + desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK), + BUFFER_ADDR_INFO0_ADDR); + desc->buf_addr_info.info1 = + le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT), + BUFFER_ADDR_INFO1_ADDR) | + le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) | + le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE); +} + +void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) + return (srng->ring_base_vaddr + srng->u.dst_ring.tp); + + return NULL; +} + +void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab, + struct hal_srng *srng) +{ + void *desc; + + lockdep_assert_held(&srng->lock); + + if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) + return NULL; + + desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; + + srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % + srng->ring_size; + + return desc; +} + +int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr) +{ + u32 tp, hp; + + lockdep_assert_held(&srng->lock); + + tp = srng->u.dst_ring.tp; + + if (sync_hw_ptr) { + hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = hp; + } else { + hp = srng->u.dst_ring.cached_hp; + } + + if (hp >= tp) + return (hp - tp) / srng->entry_size; + else + return (srng->ring_size - tp + hp) / srng->entry_size; +} + +/* Returns number of available entries in src ring */ +int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr) +{ + u32 tp, hp; + + lockdep_assert_held(&srng->lock); + + hp = srng->u.src_ring.hp; + + if (sync_hw_ptr) { + tp = *srng->u.src_ring.tp_addr; + srng->u.src_ring.cached_tp = tp; + } else { + tp = srng->u.src_ring.cached_tp; + } + + if (tp > hp) + return ((tp - hp) / srng->entry_size) - 1; + else + return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; +} + +void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab, + struct hal_srng *srng) +{ + void *desc; + u32 next_hp; + + lockdep_assert_held(&srng->lock); + + /* TODO: Using % is expensive, but we have to do this since size of some + * SRNG rings is not power of 2 (due to descriptor sizes). Need to see + * if separate function is defined for rings having power of 2 ring size + * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the + * overhead of % by using mask (with &). + */ + next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; + + if (next_hp == srng->u.src_ring.cached_tp) + return NULL; + + desc = srng->ring_base_vaddr + srng->u.src_ring.hp; + srng->u.src_ring.hp = next_hp; + + /* TODO: Reap functionality is not used by all rings. If particular + * ring does not use reap functionality, we need not update reap_hp + * with next_hp pointer. Need to make sure a separate function is used + * before doing any optimization by removing below code updating + * reap_hp. + */ + srng->u.src_ring.reap_hp = next_hp; + + return desc; +} + +void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab, + struct hal_srng *srng) +{ + void *desc; + u32 next_reap_hp; + + lockdep_assert_held(&srng->lock); + + next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp == srng->u.src_ring.cached_tp) + return NULL; + + desc = srng->ring_base_vaddr + next_reap_hp; + srng->u.src_ring.reap_hp = next_reap_hp; + + return desc; +} + +void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab, + struct hal_srng *srng) +{ + void *desc; + + lockdep_assert_held(&srng->lock); + + if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) + return NULL; + + desc = srng->ring_base_vaddr + srng->u.src_ring.hp; + srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + return desc; +} + +void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) + srng->u.src_ring.cached_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; + else + srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; +} + +/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin() + * should have been called before this. + */ +void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + /* TODO: See if we need a write memory barrier here */ + if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { + /* For LMAC rings, ring pointer updates are done through FW and + * hence written to a shared memory location that is read by FW + */ + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; + *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; + *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; + } + } else { + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.last_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; + ath12k_hif_write32(ab, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem, + srng->u.src_ring.hp); + } else { + srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; + ath12k_hif_write32(ab, + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem, + srng->u.dst_ring.tp); + } + } + + srng->timestamp = jiffies; +} + +void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset) +{ + struct ath12k_buffer_addr *link_addr; + int i; + u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64; + u32 val; + + link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; + + for (i = 1; i < nsbufs; i++) { + link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK); + + link_addr->info1 = + le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT, + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) | + le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL, + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG); + + link_addr = (void *)sbuf[i].vaddr + + HAL_WBM_IDLE_SCATTER_BUF_SIZE; + } + + val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) | + u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE); + + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab), + val); + + val = u32_encode_bits(reg_scatter_buf_sz * nsbufs, + HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab), + val); + + val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK, + BUFFER_ADDR_INFO0_ADDR); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_RING_BASE_LSB(ab), + val); + + val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL, + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) | + u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT, + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_RING_BASE_MSB(ab), + val); + + /* Setup head and tail pointers for the idle list */ + val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab), + val); + + val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) | + u32_encode_bits((end_offset >> 2), + HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab), + val); + + val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab), + val); + + val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab), + val); + + val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT), + HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) | + u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab), + val); + + val = 2 * tot_link_desc; + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab), + val); + + /* Enable the SRNG */ + val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) | + u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE); + ath12k_hif_write32(ab, + HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), + val); +} + +int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type, + int ring_num, int mac_id, + struct hal_srng_params *params) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; + struct hal_srng *srng; + int ring_id; + u32 idx; + int i; + u32 reg_base; + + ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); + if (ring_id < 0) + return ring_id; + + srng = &hal->srng_list[ring_id]; + + srng->ring_id = ring_id; + srng->ring_dir = srng_config->ring_dir; + srng->ring_base_paddr = params->ring_base_paddr; + srng->ring_base_vaddr = params->ring_base_vaddr; + srng->entry_size = srng_config->entry_size; + srng->num_entries = params->num_entries; + srng->ring_size = srng->entry_size * srng->num_entries; + srng->intr_batch_cntr_thres_entries = + params->intr_batch_cntr_thres_entries; + srng->intr_timer_thres_us = params->intr_timer_thres_us; + srng->flags = params->flags; + srng->msi_addr = params->msi_addr; + srng->msi2_addr = params->msi2_addr; + srng->msi_data = params->msi_data; + srng->msi2_data = params->msi2_data; + srng->initialized = 1; + spin_lock_init(&srng->lock); + lockdep_set_class(&srng->lock, &srng->lock_key); + + for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { + srng->hwreg_base[i] = srng_config->reg_start[i] + + (ring_num * srng_config->reg_size[i]); + } + + memset(srng->ring_base_vaddr, 0, + (srng->entry_size * srng->num_entries) << 2); + + reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { + srng->u.src_ring.hp = 0; + srng->u.src_ring.cached_tp = 0; + srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; + srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); + srng->u.src_ring.low_threshold = params->low_threshold * + srng->entry_size; + if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) { + if (!ab->hw_params->supports_shadow_regs) + srng->u.src_ring.hp_addr = + (u32 *)((unsigned long)ab->mem + reg_base); + else + ath12k_dbg(ab, ATH12K_DBG_HAL, + "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n", + type, ring_num, + reg_base, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem); + } else { + idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START; + srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + + idx); + srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; + } + } else { + /* During initialization loop count in all the descriptors + * will be set to zero, and HW will set it to 1 on completing + * descriptor update in first loop, and increments it by 1 on + * subsequent loops (loop count wraps around after reaching + * 0xffff). The 'loop_cnt' in SW ring state is the expected + * loop count in descriptors updated by HW (to be processed + * by SW). + */ + srng->u.dst_ring.loop_cnt = 1; + srng->u.dst_ring.tp = 0; + srng->u.dst_ring.cached_hp = 0; + srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); + if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) { + if (!ab->hw_params->supports_shadow_regs) + srng->u.dst_ring.tp_addr = + (u32 *)((unsigned long)ab->mem + reg_base + + (HAL_REO1_RING_TP - HAL_REO1_RING_HP)); + else + ath12k_dbg(ab, ATH12K_DBG_HAL, + "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n", + type, ring_num, + reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP, + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem); + } else { + /* For PMAC & DMAC rings, tail pointer updates will be done + * through FW by writing to a shared memory location + */ + idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START; + srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + + idx); + srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; + } + } + + if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC) + return ring_id; + + ath12k_hal_srng_hw_init(ab, srng); + + if (type == HAL_CE_DST) { + srng->u.dst_ring.max_buffer_length = params->max_buffer_len; + ath12k_hal_ce_dst_setup(ab, srng, ring_num); + } + + return ring_id; +} + +static void ath12k_hal_srng_update_hp_tp_addr(struct ath12k_base *ab, + int shadow_cfg_idx, + enum hal_ring_type ring_type, + int ring_num) +{ + struct hal_srng *srng; + struct ath12k_hal *hal = &ab->hal; + int ring_id; + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + + ring_id = ath12k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0); + if (ring_id < 0) + return; + + srng = &hal->srng_list[ring_id]; + + if (srng_config->ring_dir == HAL_SRNG_DIR_DST) + srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + (unsigned long)ab->mem); + else + srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + (unsigned long)ab->mem); +} + +int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab, + enum hal_ring_type ring_type, + int ring_num) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + int shadow_cfg_idx = hal->num_shadow_reg_configured; + u32 target_reg; + + if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS) + return -EINVAL; + + hal->num_shadow_reg_configured++; + + target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START]; + target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] * + ring_num; + + /* For destination ring, shadow the TP */ + if (srng_config->ring_dir == HAL_SRNG_DIR_DST) + target_reg += HAL_OFFSET_FROM_HP_TO_TP; + + hal->shadow_reg_addr[shadow_cfg_idx] = target_reg; + + /* update hp/tp addr to hal structure*/ + ath12k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type, + ring_num); + + ath12k_dbg(ab, ATH12K_DBG_HAL, + "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d", + target_reg, + HAL_SHADOW_REG(shadow_cfg_idx), + shadow_cfg_idx, + ring_type, ring_num); + + return 0; +} + +void ath12k_hal_srng_shadow_config(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + int ring_type, ring_num; + + /* update all the non-CE srngs. */ + for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) { + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + + if (ring_type == HAL_CE_SRC || + ring_type == HAL_CE_DST || + ring_type == HAL_CE_DST_STATUS) + continue; + + if (srng_config->mac_type == ATH12K_HAL_SRNG_DMAC || + srng_config->mac_type == ATH12K_HAL_SRNG_PMAC) + continue; + + for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++) + ath12k_hal_srng_update_shadow_config(ab, ring_type, ring_num); + } +} + +void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab, + u32 **cfg, u32 *len) +{ + struct ath12k_hal *hal = &ab->hal; + + *len = hal->num_shadow_reg_configured; + *cfg = hal->shadow_reg_addr; +} + +void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab, + struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + /* check whether the ring is empty. Update the shadow + * HP only when then ring isn't' empty. + */ + if (srng->ring_dir == HAL_SRNG_DIR_SRC && + *srng->u.src_ring.tp_addr != srng->u.src_ring.hp) + ath12k_hal_srng_access_end(ab, srng); +} + +static void ath12k_hal_register_srng_lock_keys(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_register_key(&hal->srng_list[ring_id].lock_key); +} + +static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_unregister_key(&hal->srng_list[ring_id].lock_key); +} + +int ath12k_hal_srng_init(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + int ret; + + memset(hal, 0, sizeof(*hal)); + + ret = ab->hw_params->hal_ops->create_srng_config(ab); + if (ret) + goto err_hal; + + ret = ath12k_hal_alloc_cont_rdp(ab); + if (ret) + goto err_hal; + + ret = ath12k_hal_alloc_cont_wrp(ab); + if (ret) + goto err_free_cont_rdp; + + ath12k_hal_register_srng_lock_keys(ab); + + return 0; + +err_free_cont_rdp: + ath12k_hal_free_cont_rdp(ab); + +err_hal: + return ret; +} + +void ath12k_hal_srng_deinit(struct ath12k_base *ab) +{ + struct ath12k_hal *hal = &ab->hal; + + ath12k_hal_unregister_srng_lock_keys(ab); + ath12k_hal_free_cont_rdp(ab); + ath12k_hal_free_cont_wrp(ab); + kfree(hal->srng_config); + hal->srng_config = NULL; +} + +void ath12k_hal_dump_srng_stats(struct ath12k_base *ab) +{ + struct hal_srng *srng; + struct ath12k_ext_irq_grp *irq_grp; + struct ath12k_ce_pipe *ce_pipe; + int i; + + ath12k_err(ab, "Last interrupt received for each CE:\n"); + for (i = 0; i < ab->hw_params->ce_count; i++) { + ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n", + i, ce_pipe->pipe_num, + jiffies_to_msecs(jiffies - ce_pipe->timestamp)); + } + + ath12k_err(ab, "\nLast interrupt received for each group:\n"); + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + irq_grp = &ab->ext_irq_grp[i]; + ath12k_err(ab, "group_id %d %ums before\n", + irq_grp->grp_id, + jiffies_to_msecs(jiffies - irq_grp->timestamp)); + } + + for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) { + srng = &ab->hal.srng_list[i]; + + if (!srng->initialized) + continue; + + if (srng->ring_dir == HAL_SRNG_DIR_SRC) + ath12k_err(ab, + "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n", + srng->ring_id, srng->u.src_ring.hp, + srng->u.src_ring.reap_hp, + *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp, + srng->u.src_ring.last_tp, + jiffies_to_msecs(jiffies - srng->timestamp)); + else if (srng->ring_dir == HAL_SRNG_DIR_DST) + ath12k_err(ab, + "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n", + srng->ring_id, srng->u.dst_ring.tp, + *srng->u.dst_ring.hp_addr, + srng->u.dst_ring.cached_hp, + srng->u.dst_ring.last_hp, + jiffies_to_msecs(jiffies - srng->timestamp)); + } +} diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h new file mode 100644 index 000000000000..dfbd8bce70e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal.h @@ -0,0 +1,1142 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HAL_H +#define ATH12K_HAL_H + +#include "hal_desc.h" +#include "rx_desc.h" + +struct ath12k_base; + +#define HAL_LINK_DESC_SIZE (32 << 2) +#define HAL_LINK_DESC_ALIGN 128 +#define HAL_NUM_MPDUS_PER_LINK_DESC 6 +#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7 +#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6 +#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12 +#define HAL_MAX_AVAIL_BLK_RES 3 + +#define HAL_RING_BASE_ALIGN 8 + +#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704 +/* TODO: Check with hw team on the supported scatter buf size */ +#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8 +#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \ + HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE) + +/* TODO: 16 entries per radio times MAX_VAPS_SUPPORTED */ +#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 32 +#define HAL_DSCP_TID_TBL_SIZE 24 + +/* calculate the register address from bar0 of shadow register x */ +#define HAL_SHADOW_BASE_ADDR 0x000008fc +#define HAL_SHADOW_NUM_REGS 40 +#define HAL_HP_OFFSET_IN_REG_START 1 +#define HAL_OFFSET_FROM_HP_TO_TP 4 + +#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) + +/* WCSS Relative address */ +#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 +#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 +#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 +#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000 +#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000 +#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000 +#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000 +#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 + +#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 + +#define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c + +/* SW2TCL(x) R0 ring configuration address */ +#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020 +#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240 +#define HAL_TCL1_RING_BASE_LSB 0x00000900 +#define HAL_TCL1_RING_BASE_MSB 0x00000904 +#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id) +#define HAL_TCL1_RING_MISC(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_misc) +#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_lsb) +#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_msb) +#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix0) +#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix1) +#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_lsb) +#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb) +#define HAL_TCL1_RING_MSI1_DATA(ab) \ + ((ab)->hw_params->regs->hal_tcl1_ring_msi1_data) +#define HAL_TCL2_RING_BASE_LSB 0x00000978 +#define HAL_TCL_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl_ring_base_lsb) + +#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \ + (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \ + (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \ + (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_BASE_MSB_OFFSET \ + (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_ID_OFFSET(ab) \ + (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \ + (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \ + (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB) +#define HAL_TCL1_RING_MISC_OFFSET(ab) \ + (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB) + +/* SW2TCL(x) R2 ring pointers (head/tail) address */ +#define HAL_TCL1_RING_HP 0x00002000 +#define HAL_TCL1_RING_TP 0x00002004 +#define HAL_TCL2_RING_HP 0x00002008 +#define HAL_TCL_RING_HP 0x00002028 + +#define HAL_TCL1_RING_TP_OFFSET \ + (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP) + +/* TCL STATUS ring address */ +#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_tcl_status_ring_base_lsb) +#define HAL_TCL_STATUS_RING_HP 0x00002048 + +/* PPE2TCL1 Ring address */ +#define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48 +#define HAL_TCL_PPE2TCL1_RING_HP 0x00002038 + +/* WBM PPE Release Ring address */ +#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_ppe_rel_ring_base) +#define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020 + +/* REO2SW(x) R0 ring configuration address */ +#define HAL_REO1_GEN_ENABLE 0x00000000 +#define HAL_REO1_MISC_CTRL_ADDR(ab) \ + ((ab)->hw_params->regs->hal_reo1_misc_ctrl_addr) +#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004 +#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008 +#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c +#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010 +#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0) +#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1) +#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0) +#define HAL_REO1_QDESC_LUT_BASE1(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base1) +#define HAL_REO1_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_lsb) +#define HAL_REO1_RING_BASE_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_msb) +#define HAL_REO1_RING_ID(ab) ((ab)->hw_params->regs->hal_reo1_ring_id) +#define HAL_REO1_RING_MISC(ab) ((ab)->hw_params->regs->hal_reo1_ring_misc) +#define HAL_REO1_RING_HP_ADDR_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_lsb) +#define HAL_REO1_RING_HP_ADDR_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_msb) +#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \ + ((ab)->hw_params->regs->hal_reo1_ring_producer_int_setup) +#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_lsb) +#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \ + ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_msb) +#define HAL_REO1_RING_MSI1_DATA(ab) ((ab)->hw_params->regs->hal_reo1_ring_msi1_data) +#define HAL_REO2_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo2_ring_base) +#define HAL_REO1_AGING_THRESH_IX_0(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix0) +#define HAL_REO1_AGING_THRESH_IX_1(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix1) +#define HAL_REO1_AGING_THRESH_IX_2(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix2) +#define HAL_REO1_AGING_THRESH_IX_3(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix3) + +/* REO2SW(x) R2 ring pointers (head/tail) address */ +#define HAL_REO1_RING_HP 0x00003048 +#define HAL_REO1_RING_TP 0x0000304c +#define HAL_REO2_RING_HP 0x00003050 + +#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP) + +/* REO2SW0 ring configuration address */ +#define HAL_REO_SW0_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_reo2_sw0_ring_base) + +/* REO2SW0 R2 ring pointer (head/tail) address */ +#define HAL_REO_SW0_RING_HP 0x00003088 + +/* REO CMD R0 address */ +#define HAL_REO_CMD_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_reo_cmd_ring_base) + +/* REO CMD R2 address */ +#define HAL_REO_CMD_HP 0x00003020 + +/* SW2REO R0 address */ +#define HAL_SW2REO_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_sw2reo_ring_base) +#define HAL_SW2REO1_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_sw2reo1_ring_base) + +/* SW2REO R2 address */ +#define HAL_SW2REO_RING_HP 0x00003028 +#define HAL_SW2REO1_RING_HP 0x00003030 + +/* CE ring R0 address */ +#define HAL_CE_SRC_RING_BASE_LSB 0x00000000 +#define HAL_CE_DST_RING_BASE_LSB 0x00000000 +#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058 +#define HAL_CE_DST_RING_CTRL 0x000000b0 + +/* CE ring R2 address */ +#define HAL_CE_DST_RING_HP 0x00000400 +#define HAL_CE_DST_STATUS_RING_HP 0x00000408 + +/* REO status address */ +#define HAL_REO_STATUS_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_reo_status_ring_base) +#define HAL_REO_STATUS_HP 0x000030a8 + +/* WBM Idle R0 address */ +#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm_idle_ring_base_lsb) +#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \ + ((ab)->hw_params->regs->hal_wbm_idle_ring_misc_addr) +#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab) \ + ((ab)->hw_params->regs->hal_wbm_r0_idle_list_cntl_addr) +#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab) \ + ((ab)->hw_params->regs->hal_wbm_r0_idle_list_size_addr) +#define HAL_WBM_SCATTERED_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_lsb) +#define HAL_WBM_SCATTERED_RING_BASE_MSB(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_msb) +#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix0) +#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix1) +#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix0) +#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix1) +#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab) \ + ((ab)->hw_params->regs->hal_wbm_scattered_desc_ptr_hp_addr) + +/* WBM Idle R2 address */ +#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8 + +/* SW2WBM R0 release address */ +#define HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm_sw_release_ring_base_lsb) +#define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm_sw1_release_ring_base_lsb) + +/* SW2WBM R2 release address */ +#define HAL_WBM_SW_RELEASE_RING_HP 0x00003010 +#define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018 + +/* WBM2SW R0 release address */ +#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm0_release_ring_base_lsb) + +#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params->regs->hal_wbm1_release_ring_base_lsb) + +/* WBM2SW R2 release address */ +#define HAL_WBM0_RELEASE_RING_HP 0x000030c8 +#define HAL_WBM1_RELEASE_RING_HP 0x000030d0 + +/* WBM cookie config address and mask */ +#define HAL_WBM_SW_COOKIE_CFG0 0x00000040 +#define HAL_WBM_SW_COOKIE_CFG1 0x00000044 +#define HAL_WBM_SW_COOKIE_CFG2 0x00000090 +#define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094 + +#define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0) +#define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8) +#define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13) +#define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18) +#define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0) +#define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1) +#define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3) + +#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1) +#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2) +#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3) +#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4) +#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5) +#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8) + +/* TCL ring feild mask and offset */ +#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) +#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) +#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0) +#define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0) +#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1) +#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3) +#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4) +#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5) +#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0) +#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0) +#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) +#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) +#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18) +#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21) + +/* REO ring feild mask and offset */ +#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) +#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) +#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8) +#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0) +#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3) +#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4) +#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5) +#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6) +#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16) +#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0) +#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) +#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) +#define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17) +#define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21) +#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2) +#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3) +#define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0) +#define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8) +#define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13) +#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18) +#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19) +#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20) + +/* CE ring bit field mask and shift */ +#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0) + +#define HAL_ADDR_LSB_REG_MASK 0xffffffff + +#define HAL_ADDR_MSB_REG_SHIFT 32 + +/* WBM ring bit field mask and shift */ +#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1) +#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2) +#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16) +#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0) +#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8) + +#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8) +#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8) + +#define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6) +#define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0) + +#define BASE_ADDR_MATCH_TAG_VAL 0x5 + +#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff +#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff +#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff +#define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff +#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff + +#define HAL_WBM2SW_REL_ERR_RING_NUM 3 +/* Add any other errors here and return them in + * ath12k_hal_rx_desc_get_err(). + */ + +enum hal_srng_ring_id { + HAL_SRNG_RING_ID_REO2SW0 = 0, + HAL_SRNG_RING_ID_REO2SW1, + HAL_SRNG_RING_ID_REO2SW2, + HAL_SRNG_RING_ID_REO2SW3, + HAL_SRNG_RING_ID_REO2SW4, + HAL_SRNG_RING_ID_REO2SW5, + HAL_SRNG_RING_ID_REO2SW6, + HAL_SRNG_RING_ID_REO2SW7, + HAL_SRNG_RING_ID_REO2SW8, + HAL_SRNG_RING_ID_REO2TCL, + HAL_SRNG_RING_ID_REO2PPE, + + HAL_SRNG_RING_ID_SW2REO = 16, + HAL_SRNG_RING_ID_SW2REO1, + HAL_SRNG_RING_ID_SW2REO2, + HAL_SRNG_RING_ID_SW2REO3, + + HAL_SRNG_RING_ID_REO_CMD, + HAL_SRNG_RING_ID_REO_STATUS, + + HAL_SRNG_RING_ID_SW2TCL1 = 24, + HAL_SRNG_RING_ID_SW2TCL2, + HAL_SRNG_RING_ID_SW2TCL3, + HAL_SRNG_RING_ID_SW2TCL4, + HAL_SRNG_RING_ID_SW2TCL5, + HAL_SRNG_RING_ID_SW2TCL6, + HAL_SRNG_RING_ID_PPE2TCL1 = 30, + + HAL_SRNG_RING_ID_SW2TCL_CMD = 40, + HAL_SRNG_RING_ID_SW2TCL1_CMD, + HAL_SRNG_RING_ID_TCL_STATUS, + + HAL_SRNG_RING_ID_CE0_SRC = 64, + HAL_SRNG_RING_ID_CE1_SRC, + HAL_SRNG_RING_ID_CE2_SRC, + HAL_SRNG_RING_ID_CE3_SRC, + HAL_SRNG_RING_ID_CE4_SRC, + HAL_SRNG_RING_ID_CE5_SRC, + HAL_SRNG_RING_ID_CE6_SRC, + HAL_SRNG_RING_ID_CE7_SRC, + HAL_SRNG_RING_ID_CE8_SRC, + HAL_SRNG_RING_ID_CE9_SRC, + HAL_SRNG_RING_ID_CE10_SRC, + HAL_SRNG_RING_ID_CE11_SRC, + HAL_SRNG_RING_ID_CE12_SRC, + HAL_SRNG_RING_ID_CE13_SRC, + HAL_SRNG_RING_ID_CE14_SRC, + HAL_SRNG_RING_ID_CE15_SRC, + + HAL_SRNG_RING_ID_CE0_DST = 81, + HAL_SRNG_RING_ID_CE1_DST, + HAL_SRNG_RING_ID_CE2_DST, + HAL_SRNG_RING_ID_CE3_DST, + HAL_SRNG_RING_ID_CE4_DST, + HAL_SRNG_RING_ID_CE5_DST, + HAL_SRNG_RING_ID_CE6_DST, + HAL_SRNG_RING_ID_CE7_DST, + HAL_SRNG_RING_ID_CE8_DST, + HAL_SRNG_RING_ID_CE9_DST, + HAL_SRNG_RING_ID_CE10_DST, + HAL_SRNG_RING_ID_CE11_DST, + HAL_SRNG_RING_ID_CE12_DST, + HAL_SRNG_RING_ID_CE13_DST, + HAL_SRNG_RING_ID_CE14_DST, + HAL_SRNG_RING_ID_CE15_DST, + + HAL_SRNG_RING_ID_CE0_DST_STATUS = 100, + HAL_SRNG_RING_ID_CE1_DST_STATUS, + HAL_SRNG_RING_ID_CE2_DST_STATUS, + HAL_SRNG_RING_ID_CE3_DST_STATUS, + HAL_SRNG_RING_ID_CE4_DST_STATUS, + HAL_SRNG_RING_ID_CE5_DST_STATUS, + HAL_SRNG_RING_ID_CE6_DST_STATUS, + HAL_SRNG_RING_ID_CE7_DST_STATUS, + HAL_SRNG_RING_ID_CE8_DST_STATUS, + HAL_SRNG_RING_ID_CE9_DST_STATUS, + HAL_SRNG_RING_ID_CE10_DST_STATUS, + HAL_SRNG_RING_ID_CE11_DST_STATUS, + HAL_SRNG_RING_ID_CE12_DST_STATUS, + HAL_SRNG_RING_ID_CE13_DST_STATUS, + HAL_SRNG_RING_ID_CE14_DST_STATUS, + HAL_SRNG_RING_ID_CE15_DST_STATUS, + + HAL_SRNG_RING_ID_WBM_IDLE_LINK = 120, + HAL_SRNG_RING_ID_WBM_SW0_RELEASE, + HAL_SRNG_RING_ID_WBM_SW1_RELEASE, + HAL_SRNG_RING_ID_WBM_PPE_RELEASE = 123, + + HAL_SRNG_RING_ID_WBM2SW0_RELEASE = 128, + HAL_SRNG_RING_ID_WBM2SW1_RELEASE, + HAL_SRNG_RING_ID_WBM2SW2_RELEASE, + HAL_SRNG_RING_ID_WBM2SW3_RELEASE, /* RX ERROR RING */ + HAL_SRNG_RING_ID_WBM2SW4_RELEASE, + HAL_SRNG_RING_ID_WBM2SW5_RELEASE, + HAL_SRNG_RING_ID_WBM2SW6_RELEASE, + HAL_SRNG_RING_ID_WBM2SW7_RELEASE, + + HAL_SRNG_RING_ID_UMAC_ID_END = 159, + + /* Common DMAC rings shared by all LMACs */ + HAL_SRNG_RING_ID_DMAC_CMN_ID_START = 160, + HAL_SRNG_SW2RXDMA_BUF0 = HAL_SRNG_RING_ID_DMAC_CMN_ID_START, + HAL_SRNG_SW2RXDMA_BUF1 = 161, + HAL_SRNG_SW2RXDMA_BUF2 = 162, + + HAL_SRNG_SW2RXMON_BUF0 = 168, + + HAL_SRNG_SW2TXMON_BUF0 = 176, + + HAL_SRNG_RING_ID_DMAC_CMN_ID_END = 183, + HAL_SRNG_RING_ID_PMAC1_ID_START = 184, + + HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START, + + HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, + HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, + HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, + HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, + HAL_SRNG_RING_ID_RXDMA_DIR_BUF, + HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, + HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, + + HAL_SRNG_RING_ID_PMAC1_ID_END, +}; + +/* SRNG registers are split into two groups R0 and R2 */ +#define HAL_SRNG_REG_GRP_R0 0 +#define HAL_SRNG_REG_GRP_R2 1 +#define HAL_SRNG_NUM_REG_GRP 2 + +/* TODO: number of PMACs */ +#define HAL_SRNG_NUM_PMACS 3 +#define HAL_SRNG_NUM_DMAC_RINGS (HAL_SRNG_RING_ID_DMAC_CMN_ID_END - \ + HAL_SRNG_RING_ID_DMAC_CMN_ID_START) +#define HAL_SRNG_RINGS_PER_PMAC (HAL_SRNG_RING_ID_PMAC1_ID_END - \ + HAL_SRNG_RING_ID_PMAC1_ID_START) +#define HAL_SRNG_NUM_PMAC_RINGS (HAL_SRNG_NUM_PMACS * HAL_SRNG_RINGS_PER_PMAC) +#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_DMAC_CMN_ID_END + \ + HAL_SRNG_NUM_PMAC_RINGS) + +enum hal_ring_type { + HAL_REO_DST, + HAL_REO_EXCEPTION, + HAL_REO_REINJECT, + HAL_REO_CMD, + HAL_REO_STATUS, + HAL_TCL_DATA, + HAL_TCL_CMD, + HAL_TCL_STATUS, + HAL_CE_SRC, + HAL_CE_DST, + HAL_CE_DST_STATUS, + HAL_WBM_IDLE_LINK, + HAL_SW2WBM_RELEASE, + HAL_WBM2SW_RELEASE, + HAL_RXDMA_BUF, + HAL_RXDMA_DST, + HAL_RXDMA_MONITOR_BUF, + HAL_RXDMA_MONITOR_STATUS, + HAL_RXDMA_MONITOR_DST, + HAL_RXDMA_MONITOR_DESC, + HAL_RXDMA_DIR_BUF, + HAL_PPE2TCL, + HAL_PPE_RELEASE, + HAL_TX_MONITOR_BUF, + HAL_TX_MONITOR_DST, + HAL_MAX_RING_TYPES, +}; + +#define HAL_RX_MAX_BA_WINDOW 256 + +#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000) +#define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000) + +/** + * enum hal_reo_cmd_type: Enum for REO command type + * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats + * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue + * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache + * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked + * earlier with a 'REO_FLUSH_CACHE' command + * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list + * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings + */ +enum hal_reo_cmd_type { + HAL_REO_CMD_GET_QUEUE_STATS = 0, + HAL_REO_CMD_FLUSH_QUEUE = 1, + HAL_REO_CMD_FLUSH_CACHE = 2, + HAL_REO_CMD_UNBLOCK_CACHE = 3, + HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4, + HAL_REO_CMD_UPDATE_RX_QUEUE = 5, +}; + +/** + * enum hal_reo_cmd_status: Enum for execution status of REO command + * @HAL_REO_CMD_SUCCESS: Command has successfully executed + * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue + * or cache was blocked + * @HAL_REO_CMD_FAILED: Command execution failed, could be due to + * invalid queue desc + * @HAL_REO_CMD_RESOURCE_BLOCKED: + * @HAL_REO_CMD_DRAIN: + */ +enum hal_reo_cmd_status { + HAL_REO_CMD_SUCCESS = 0, + HAL_REO_CMD_BLOCKED = 1, + HAL_REO_CMD_FAILED = 2, + HAL_REO_CMD_RESOURCE_BLOCKED = 3, + HAL_REO_CMD_DRAIN = 0xff, +}; + +struct hal_wbm_idle_scatter_list { + dma_addr_t paddr; + struct hal_wbm_link_desc *vaddr; +}; + +struct hal_srng_params { + dma_addr_t ring_base_paddr; + u32 *ring_base_vaddr; + int num_entries; + u32 intr_batch_cntr_thres_entries; + u32 intr_timer_thres_us; + u32 flags; + u32 max_buffer_len; + u32 low_threshold; + u32 high_threshold; + dma_addr_t msi_addr; + dma_addr_t msi2_addr; + u32 msi_data; + u32 msi2_data; + + /* Add more params as needed */ +}; + +enum hal_srng_dir { + HAL_SRNG_DIR_SRC, + HAL_SRNG_DIR_DST +}; + +/* srng flags */ +#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008 +#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010 +#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020 +#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000 +#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000 +#define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN 0x00080000 +#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000 + +#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) +#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10) + +/* Common SRNG ring structure for source and destination rings */ +struct hal_srng { + /* Unique SRNG ring ID */ + u8 ring_id; + + /* Ring initialization done */ + u8 initialized; + + /* Interrupt/MSI value assigned to this ring */ + int irq; + + /* Physical base address of the ring */ + dma_addr_t ring_base_paddr; + + /* Virtual base address of the ring */ + u32 *ring_base_vaddr; + + /* Number of entries in ring */ + u32 num_entries; + + /* Ring size */ + u32 ring_size; + + /* Ring size mask */ + u32 ring_size_mask; + + /* Size of ring entry */ + u32 entry_size; + + /* Interrupt timer threshold - in micro seconds */ + u32 intr_timer_thres_us; + + /* Interrupt batch counter threshold - in number of ring entries */ + u32 intr_batch_cntr_thres_entries; + + /* MSI Address */ + dma_addr_t msi_addr; + + /* MSI data */ + u32 msi_data; + + /* MSI2 Address */ + dma_addr_t msi2_addr; + + /* MSI2 data */ + u32 msi2_data; + + /* Misc flags */ + u32 flags; + + /* Lock for serializing ring index updates */ + spinlock_t lock; + + struct lock_class_key lock_key; + + /* Start offset of SRNG register groups for this ring + * TBD: See if this is required - register address can be derived + * from ring ID + */ + u32 hwreg_base[HAL_SRNG_NUM_REG_GRP]; + + u64 timestamp; + + /* Source or Destination ring */ + enum hal_srng_dir ring_dir; + + union { + struct { + /* SW tail pointer */ + u32 tp; + + /* Shadow head pointer location to be updated by HW */ + volatile u32 *hp_addr; + + /* Cached head pointer */ + u32 cached_hp; + + /* Tail pointer location to be updated by SW - This + * will be a register address and need not be + * accessed through SW structure + */ + u32 *tp_addr; + + /* Current SW loop cnt */ + u32 loop_cnt; + + /* max transfer size */ + u16 max_buffer_length; + + /* head pointer at access end */ + u32 last_hp; + } dst_ring; + + struct { + /* SW head pointer */ + u32 hp; + + /* SW reap head pointer */ + u32 reap_hp; + + /* Shadow tail pointer location to be updated by HW */ + u32 *tp_addr; + + /* Cached tail pointer */ + u32 cached_tp; + + /* Head pointer location to be updated by SW - This + * will be a register address and need not be accessed + * through SW structure + */ + u32 *hp_addr; + + /* Low threshold - in number of ring entries */ + u32 low_threshold; + + /* tail pointer at access end */ + u32 last_tp; + } src_ring; + } u; +}; + +/* Interrupt mitigation - Batch threshold in terms of numer of frames */ +#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256 +#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128 +#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1 + +/* Interrupt mitigation - timer threshold in us */ +#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000 +#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500 +#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256 + +enum hal_srng_mac_type { + ATH12K_HAL_SRNG_UMAC, + ATH12K_HAL_SRNG_DMAC, + ATH12K_HAL_SRNG_PMAC +}; + +/* HW SRNG configuration table */ +struct hal_srng_config { + int start_ring_id; + u16 max_rings; + u16 entry_size; + u32 reg_start[HAL_SRNG_NUM_REG_GRP]; + u16 reg_size[HAL_SRNG_NUM_REG_GRP]; + enum hal_srng_mac_type mac_type; + enum hal_srng_dir ring_dir; + u32 max_size; +}; + +/** + * enum hal_rx_buf_return_buf_manager + * + * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list + * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config + * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config + * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config + * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW + * @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host + * @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host + * @HAL_RX_BUF_RBM_SW2_BM: For ring 2 -- returned to host + * @HAL_RX_BUF_RBM_SW3_BM: For ring 3 -- returned to host + * @HAL_RX_BUF_RBM_SW4_BM: For ring 4 -- returned to host + * @HAL_RX_BUF_RBM_SW5_BM: For ring 5 -- returned to host + * @HAL_RX_BUF_RBM_SW6_BM: For ring 6 -- returned to host + */ + +enum hal_rx_buf_return_buf_manager { + HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST, + HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_FW_BM, + HAL_RX_BUF_RBM_SW0_BM, + HAL_RX_BUF_RBM_SW1_BM, + HAL_RX_BUF_RBM_SW2_BM, + HAL_RX_BUF_RBM_SW3_BM, + HAL_RX_BUF_RBM_SW4_BM, + HAL_RX_BUF_RBM_SW5_BM, + HAL_RX_BUF_RBM_SW6_BM, +}; + +#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000 + +#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0) +#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1) +#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2) +#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3) +#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4) +#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5) +#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6) +#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) +#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */ +#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) +#define HAL_REO_CMD_UPD0_VLD BIT(9) +#define HAL_REO_CMD_UPD0_ALDC BIT(10) +#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11) +#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12) +#define HAL_REO_CMD_UPD0_AC BIT(13) +#define HAL_REO_CMD_UPD0_BAR BIT(14) +#define HAL_REO_CMD_UPD0_RETRY BIT(15) +#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16) +#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17) +#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18) +#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19) +#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20) +#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21) +#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22) +#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23) +#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24) +#define HAL_REO_CMD_UPD0_SVLD BIT(25) +#define HAL_REO_CMD_UPD0_SSN BIT(26) +#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27) +#define HAL_REO_CMD_UPD0_PN_ERR BIT(28) +#define HAL_REO_CMD_UPD0_PN_VALID BIT(29) +#define HAL_REO_CMD_UPD0_PN BIT(30) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */ +#define HAL_REO_CMD_UPD1_VLD BIT(16) +#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) +#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) +#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20) +#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21) +#define HAL_REO_CMD_UPD1_BAR BIT(23) +#define HAL_REO_CMD_UPD1_RETRY BIT(24) +#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25) +#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26) +#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27) +#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28) +#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29) +#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) +#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */ +#define HAL_REO_CMD_UPD2_SVLD BIT(10) +#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) +#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23) +#define HAL_REO_CMD_UPD2_PN_ERR BIT(24) + +struct ath12k_hal_reo_cmd { + u32 addr_lo; + u32 flag; + u32 upd0; + u32 upd1; + u32 upd2; + u32 pn[4]; + u16 rx_queue_num; + u16 min_rel; + u16 min_fwd; + u8 addr_hi; + u8 ac_list; + u8 blocking_idx; + u16 ba_window_size; + u8 pn_size; +}; + +enum hal_pn_type { + HAL_PN_TYPE_NONE, + HAL_PN_TYPE_WPA, + HAL_PN_TYPE_WAPI_EVEN, + HAL_PN_TYPE_WAPI_UNEVEN, +}; + +enum hal_ce_desc { + HAL_CE_DESC_SRC, + HAL_CE_DESC_DST, + HAL_CE_DESC_DST_STATUS, +}; + +#define HAL_HASH_ROUTING_RING_TCL 0 +#define HAL_HASH_ROUTING_RING_SW1 1 +#define HAL_HASH_ROUTING_RING_SW2 2 +#define HAL_HASH_ROUTING_RING_SW3 3 +#define HAL_HASH_ROUTING_RING_SW4 4 +#define HAL_HASH_ROUTING_RING_REL 5 +#define HAL_HASH_ROUTING_RING_FW 6 + +struct hal_reo_status_header { + u16 cmd_num; + enum hal_reo_cmd_status cmd_status; + u16 cmd_exe_time; + u32 timestamp; +}; + +struct hal_reo_status_queue_stats { + u16 ssn; + u16 curr_idx; + u32 pn[4]; + u32 last_rx_queue_ts; + u32 last_rx_dequeue_ts; + u32 rx_bitmap[8]; /* Bitmap from 0-255 */ + u32 curr_mpdu_cnt; + u32 curr_msdu_cnt; + u16 fwd_due_to_bar_cnt; + u16 dup_cnt; + u32 frames_in_order_cnt; + u32 num_mpdu_processed_cnt; + u32 num_msdu_processed_cnt; + u32 total_num_processed_byte_cnt; + u32 late_rx_mpdu_cnt; + u32 reorder_hole_cnt; + u8 timeout_cnt; + u8 bar_rx_cnt; + u8 num_window_2k_jump_cnt; +}; + +struct hal_reo_status_flush_queue { + bool err_detected; +}; + +enum hal_reo_status_flush_cache_err_code { + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS, + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE, + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND, +}; + +struct hal_reo_status_flush_cache { + bool err_detected; + enum hal_reo_status_flush_cache_err_code err_code; + bool cache_controller_flush_status_hit; + u8 cache_controller_flush_status_desc_type; + u8 cache_controller_flush_status_client_id; + u8 cache_controller_flush_status_err; + u8 cache_controller_flush_status_cnt; +}; + +enum hal_reo_status_unblock_cache_type { + HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE, + HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE, +}; + +struct hal_reo_status_unblock_cache { + bool err_detected; + enum hal_reo_status_unblock_cache_type unblock_type; +}; + +struct hal_reo_status_flush_timeout_list { + bool err_detected; + bool list_empty; + u16 release_desc_cnt; + u16 fwd_buf_cnt; +}; + +enum hal_reo_threshold_idx { + HAL_REO_THRESHOLD_IDX_DESC_COUNTER0, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER1, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER2, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM, +}; + +struct hal_reo_status_desc_thresh_reached { + enum hal_reo_threshold_idx threshold_idx; + u32 link_desc_counter0; + u32 link_desc_counter1; + u32 link_desc_counter2; + u32 link_desc_counter_sum; +}; + +struct hal_reo_status { + struct hal_reo_status_header uniform_hdr; + u8 loop_cnt; + union { + struct hal_reo_status_queue_stats queue_stats; + struct hal_reo_status_flush_queue flush_queue; + struct hal_reo_status_flush_cache flush_cache; + struct hal_reo_status_unblock_cache unblock_cache; + struct hal_reo_status_flush_timeout_list timeout_list; + struct hal_reo_status_desc_thresh_reached desc_thresh_reached; + } u; +}; + +/* HAL context to be used to access SRNG APIs (currently used by data path + * and transport (CE) modules) + */ +struct ath12k_hal { + /* HAL internal state for all SRNG rings. + */ + struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX]; + + /* SRNG configuration table */ + struct hal_srng_config *srng_config; + + /* Remote pointer memory for HW/FW updates */ + struct { + u32 *vaddr; + dma_addr_t paddr; + } rdp; + + /* Shared memory for ring pointer updates from host to FW */ + struct { + u32 *vaddr; + dma_addr_t paddr; + } wrp; + + /* Available REO blocking resources bitmap */ + u8 avail_blk_resource; + + u8 current_blk_index; + + /* shadow register configuration */ + u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; + int num_shadow_reg_configured; +}; + +/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */ +struct ath12k_hal_tcl_to_wbm_rbm_map { + u8 wbm_ring_num; + u8 rbm_id; +}; + +struct hal_ops { + bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc); + bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc); + bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc); + bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc); + u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc); + u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc); + void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc); + u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc); + void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len); + struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc); + u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc); + u32 (*rx_desc_get_mpdu_start_offset)(void); + u32 (*rx_desc_get_msdu_end_offset)(void); + bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc); + u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc); + bool (*rx_desc_is_mcbc)(struct hal_rx_desc *desc); + void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr); + u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc); + void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype); + int (*create_srng_config)(struct ath12k_base *ab); + bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc); + bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc); + bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc); + bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc); + u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc); + const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map; +}; + +extern const struct hal_ops hal_qcn9274_ops; +extern const struct hal_ops hal_wcn7850_ops; + +u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); +void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, + int tid, u32 ba_window_size, + u32 start_seq, enum hal_pn_type type); +void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, + struct hal_srng *srng); +void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map); +void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset); + +dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab, + struct hal_srng *srng); +dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab, + struct hal_srng *srng); +void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, + dma_addr_t paddr); +u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type); +void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr, + u32 len, u32 id, u8 byte_swap_data); +void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr); +u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc); +int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type); +int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type); +void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng, + struct hal_srng_params *params); +void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab, + struct hal_srng *srng); +void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng); +int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr); +void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab, + struct hal_srng *srng); +void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab, + struct hal_srng *srng); +void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab, + struct hal_srng *srng); +int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr); +void ath12k_hal_srng_access_begin(struct ath12k_base *ab, + struct hal_srng *srng); +void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng); +int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type, + int ring_num, int mac_id, + struct hal_srng_params *params); +int ath12k_hal_srng_init(struct ath12k_base *ath12k); +void ath12k_hal_srng_deinit(struct ath12k_base *ath12k); +void ath12k_hal_dump_srng_stats(struct ath12k_base *ab); +void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab, + u32 **cfg, u32 *len); +int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab, + enum hal_ring_type ring_type, + int ring_num); +void ath12k_hal_srng_shadow_config(struct ath12k_base *ab); +void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab, + struct hal_srng *srng); +#endif diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h new file mode 100644 index 000000000000..2250ca2d19a3 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -0,0 +1,2961 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include "core.h" + +#ifndef ATH12K_HAL_DESC_H +#define ATH12K_HAL_DESC_H + +#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0) + +#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0) +#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(11, 8) +#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 12) + +struct ath12k_buffer_addr { + __le32 info0; + __le32 info1; +} __packed; + +/* ath12k_buffer_addr + * + * buffer_addr_31_0 + * Address (lower 32 bits) of the MSDU buffer or MSDU_EXTENSION + * descriptor or Link descriptor + * + * buffer_addr_39_32 + * Address (upper 8 bits) of the MSDU buffer or MSDU_EXTENSION + * descriptor or Link descriptor + * + * return_buffer_manager (RBM) + * Consumer: WBM + * Producer: SW/FW + * Indicates to which buffer manager the buffer or MSDU_EXTENSION + * descriptor or link descriptor that is being pointed to shall be + * returned after the frame has been processed. It is used by WBM + * for routing purposes. + * + * Values are defined in enum %HAL_RX_BUF_RBM_ + * + * sw_buffer_cookie + * Cookie field exclusively used by SW. HW ignores the contents, + * accept that it passes the programmed value on to other + * descriptors together with the physical address. + * + * Field can be used by SW to for example associate the buffers + * physical address with the virtual address. + * + * NOTE1: + * The three most significant bits can have a special meaning + * in case this struct is embedded in a TX_MPDU_DETAILS STRUCT, + * and field transmit_bw_restriction is set + * + * In case of NON punctured transmission: + * Sw_buffer_cookie[19:17] = 3'b000: 20 MHz TX only + * Sw_buffer_cookie[19:17] = 3'b001: 40 MHz TX only + * Sw_buffer_cookie[19:17] = 3'b010: 80 MHz TX only + * Sw_buffer_cookie[19:17] = 3'b011: 160 MHz TX only + * Sw_buffer_cookie[19:17] = 3'b101: 240 MHz TX only + * Sw_buffer_cookie[19:17] = 3'b100: 320 MHz TX only + * Sw_buffer_cookie[19:18] = 2'b11: reserved + * + * In case of punctured transmission: + * Sw_buffer_cookie[19:16] = 4'b0000: pattern 0 only + * Sw_buffer_cookie[19:16] = 4'b0001: pattern 1 only + * Sw_buffer_cookie[19:16] = 4'b0010: pattern 2 only + * Sw_buffer_cookie[19:16] = 4'b0011: pattern 3 only + * Sw_buffer_cookie[19:16] = 4'b0100: pattern 4 only + * Sw_buffer_cookie[19:16] = 4'b0101: pattern 5 only + * Sw_buffer_cookie[19:16] = 4'b0110: pattern 6 only + * Sw_buffer_cookie[19:16] = 4'b0111: pattern 7 only + * Sw_buffer_cookie[19:16] = 4'b1000: pattern 8 only + * Sw_buffer_cookie[19:16] = 4'b1001: pattern 9 only + * Sw_buffer_cookie[19:16] = 4'b1010: pattern 10 only + * Sw_buffer_cookie[19:16] = 4'b1011: pattern 11 only + * Sw_buffer_cookie[19:18] = 2'b11: reserved + * + * Note: a punctured transmission is indicated by the presence + * of TLV TX_PUNCTURE_SETUP embedded in the scheduler TLV + * + * Sw_buffer_cookie[20:17]: Tid: The TID field in the QoS control + * field + * + * Sw_buffer_cookie[16]: Mpdu_qos_control_valid: This field + * indicates MPDUs with a QoS control field. + * + */ + +enum hal_tlv_tag { + HAL_MACTX_CBF_START = 0 /* 0x0 */, + HAL_PHYRX_DATA = 1 /* 0x1 */, + HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */, + HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */, + HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */, + HAL_MACTX_DATA_RESP = 5 /* 0x5 */, + HAL_MACTX_CBF_DATA = 6 /* 0x6 */, + HAL_MACTX_CBF_DONE = 7 /* 0x7 */, + HAL_PHYRX_LMR_DATA_RESP = 8 /* 0x8 */, + HAL_RXPCU_TO_UCODE_START = 9 /* 0x9 */, + HAL_RXPCU_TO_UCODE_DELIMITER_FOR_FULL_MPDU = 10 /* 0xa */, + HAL_RXPCU_TO_UCODE_FULL_MPDU_DATA = 11 /* 0xb */, + HAL_RXPCU_TO_UCODE_FCS_STATUS = 12 /* 0xc */, + HAL_RXPCU_TO_UCODE_MPDU_DELIMITER = 13 /* 0xd */, + HAL_RXPCU_TO_UCODE_DELIMITER_FOR_MPDU_HEADER = 14 /* 0xe */, + HAL_RXPCU_TO_UCODE_MPDU_HEADER_DATA = 15 /* 0xf */, + HAL_RXPCU_TO_UCODE_END = 16 /* 0x10 */, + HAL_MACRX_CBF_READ_REQUEST = 32 /* 0x20 */, + HAL_MACRX_CBF_DATA_REQUEST = 33 /* 0x21 */, + HAL_MACRXXPECT_NDP_RECEPTION = 34 /* 0x22 */, + HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 35 /* 0x23 */, + HAL_MACRX_NDP_TIMEOUT = 36 /* 0x24 */, + HAL_MACRX_ABORT_ACK = 37 /* 0x25 */, + HAL_MACRX_REQ_IMPLICIT_FB = 38 /* 0x26 */, + HAL_MACRX_CHAIN_MASK = 39 /* 0x27 */, + HAL_MACRX_NAP_USER = 40 /* 0x28 */, + HAL_MACRX_ABORT_REQUEST = 41 /* 0x29 */, + HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 42 /* 0x2a */, + HAL_PHYTX_ABORT_ACK = 43 /* 0x2b */, + HAL_PHYTX_ABORT_REQUEST = 44 /* 0x2c */, + HAL_PHYTX_PKT_END = 45 /* 0x2d */, + HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 46 /* 0x2e */, + HAL_PHYTX_REQUEST_CTRL_INFO = 47 /* 0x2f */, + HAL_PHYTX_DATA_REQUEST = 48 /* 0x30 */, + HAL_PHYTX_BF_CV_LOADING_DONE = 49 /* 0x31 */, + HAL_PHYTX_NAP_ACK = 50 /* 0x32 */, + HAL_PHYTX_NAP_DONE = 51 /* 0x33 */, + HAL_PHYTX_OFF_ACK = 52 /* 0x34 */, + HAL_PHYTX_ON_ACK = 53 /* 0x35 */, + HAL_PHYTX_SYNTH_OFF_ACK = 54 /* 0x36 */, + HAL_PHYTX_DEBUG16 = 55 /* 0x37 */, + HAL_MACTX_ABORT_REQUEST = 56 /* 0x38 */, + HAL_MACTX_ABORT_ACK = 57 /* 0x39 */, + HAL_MACTX_PKT_END = 58 /* 0x3a */, + HAL_MACTX_PRE_PHY_DESC = 59 /* 0x3b */, + HAL_MACTX_BF_PARAMS_COMMON = 60 /* 0x3c */, + HAL_MACTX_BF_PARAMS_PER_USER = 61 /* 0x3d */, + HAL_MACTX_PREFETCH_CV = 62 /* 0x3e */, + HAL_MACTX_USER_DESC_COMMON = 63 /* 0x3f */, + HAL_MACTX_USER_DESC_PER_USER = 64 /* 0x40 */, + HAL_XAMPLE_USER_TLV_16 = 65 /* 0x41 */, + HAL_XAMPLE_TLV_16 = 66 /* 0x42 */, + HAL_MACTX_PHY_OFF = 67 /* 0x43 */, + HAL_MACTX_PHY_ON = 68 /* 0x44 */, + HAL_MACTX_SYNTH_OFF = 69 /* 0x45 */, + HAL_MACTXXPECT_CBF_COMMON = 70 /* 0x46 */, + HAL_MACTXXPECT_CBF_PER_USER = 71 /* 0x47 */, + HAL_MACTX_PHY_DESC = 72 /* 0x48 */, + HAL_MACTX_L_SIG_A = 73 /* 0x49 */, + HAL_MACTX_L_SIG_B = 74 /* 0x4a */, + HAL_MACTX_HT_SIG = 75 /* 0x4b */, + HAL_MACTX_VHT_SIG_A = 76 /* 0x4c */, + HAL_MACTX_VHT_SIG_B_SU20 = 77 /* 0x4d */, + HAL_MACTX_VHT_SIG_B_SU40 = 78 /* 0x4e */, + HAL_MACTX_VHT_SIG_B_SU80 = 79 /* 0x4f */, + HAL_MACTX_VHT_SIG_B_SU160 = 80 /* 0x50 */, + HAL_MACTX_VHT_SIG_B_MU20 = 81 /* 0x51 */, + HAL_MACTX_VHT_SIG_B_MU40 = 82 /* 0x52 */, + HAL_MACTX_VHT_SIG_B_MU80 = 83 /* 0x53 */, + HAL_MACTX_VHT_SIG_B_MU160 = 84 /* 0x54 */, + HAL_MACTX_SERVICE = 85 /* 0x55 */, + HAL_MACTX_HE_SIG_A_SU = 86 /* 0x56 */, + HAL_MACTX_HE_SIG_A_MU_DL = 87 /* 0x57 */, + HAL_MACTX_HE_SIG_A_MU_UL = 88 /* 0x58 */, + HAL_MACTX_HE_SIG_B1_MU = 89 /* 0x59 */, + HAL_MACTX_HE_SIG_B2_MU = 90 /* 0x5a */, + HAL_MACTX_HE_SIG_B2_OFDMA = 91 /* 0x5b */, + HAL_MACTX_DELETE_CV = 92 /* 0x5c */, + HAL_MACTX_MU_UPLINK_COMMON = 93 /* 0x5d */, + HAL_MACTX_MU_UPLINK_USER_SETUP = 94 /* 0x5e */, + HAL_MACTX_OTHER_TRANSMIT_INFO = 95 /* 0x5f */, + HAL_MACTX_PHY_NAP = 96 /* 0x60 */, + HAL_MACTX_DEBUG = 97 /* 0x61 */, + HAL_PHYRX_ABORT_ACK = 98 /* 0x62 */, + HAL_PHYRX_GENERATED_CBF_DETAILS = 99 /* 0x63 */, + HAL_PHYRX_RSSI_LEGACY = 100 /* 0x64 */, + HAL_PHYRX_RSSI_HT = 101 /* 0x65 */, + HAL_PHYRX_USER_INFO = 102 /* 0x66 */, + HAL_PHYRX_PKT_END = 103 /* 0x67 */, + HAL_PHYRX_DEBUG = 104 /* 0x68 */, + HAL_PHYRX_CBF_TRANSFER_DONE = 105 /* 0x69 */, + HAL_PHYRX_CBF_TRANSFER_ABORT = 106 /* 0x6a */, + HAL_PHYRX_L_SIG_A = 107 /* 0x6b */, + HAL_PHYRX_L_SIG_B = 108 /* 0x6c */, + HAL_PHYRX_HT_SIG = 109 /* 0x6d */, + HAL_PHYRX_VHT_SIG_A = 110 /* 0x6e */, + HAL_PHYRX_VHT_SIG_B_SU20 = 111 /* 0x6f */, + HAL_PHYRX_VHT_SIG_B_SU40 = 112 /* 0x70 */, + HAL_PHYRX_VHT_SIG_B_SU80 = 113 /* 0x71 */, + HAL_PHYRX_VHT_SIG_B_SU160 = 114 /* 0x72 */, + HAL_PHYRX_VHT_SIG_B_MU20 = 115 /* 0x73 */, + HAL_PHYRX_VHT_SIG_B_MU40 = 116 /* 0x74 */, + HAL_PHYRX_VHT_SIG_B_MU80 = 117 /* 0x75 */, + HAL_PHYRX_VHT_SIG_B_MU160 = 118 /* 0x76 */, + HAL_PHYRX_HE_SIG_A_SU = 119 /* 0x77 */, + HAL_PHYRX_HE_SIG_A_MU_DL = 120 /* 0x78 */, + HAL_PHYRX_HE_SIG_A_MU_UL = 121 /* 0x79 */, + HAL_PHYRX_HE_SIG_B1_MU = 122 /* 0x7a */, + HAL_PHYRX_HE_SIG_B2_MU = 123 /* 0x7b */, + HAL_PHYRX_HE_SIG_B2_OFDMA = 124 /* 0x7c */, + HAL_PHYRX_OTHER_RECEIVE_INFO = 125 /* 0x7d */, + HAL_PHYRX_COMMON_USER_INFO = 126 /* 0x7e */, + HAL_PHYRX_DATA_DONE = 127 /* 0x7f */, + HAL_COEX_TX_REQ = 128 /* 0x80 */, + HAL_DUMMY = 129 /* 0x81 */, + HALXAMPLE_TLV_32_NAME = 130 /* 0x82 */, + HAL_MPDU_LIMIT = 131 /* 0x83 */, + HAL_NA_LENGTH_END = 132 /* 0x84 */, + HAL_OLE_BUF_STATUS = 133 /* 0x85 */, + HAL_PCU_PPDU_SETUP_DONE = 134 /* 0x86 */, + HAL_PCU_PPDU_SETUP_END = 135 /* 0x87 */, + HAL_PCU_PPDU_SETUP_INIT = 136 /* 0x88 */, + HAL_PCU_PPDU_SETUP_START = 137 /* 0x89 */, + HAL_PDG_FES_SETUP = 138 /* 0x8a */, + HAL_PDG_RESPONSE = 139 /* 0x8b */, + HAL_PDG_TX_REQ = 140 /* 0x8c */, + HAL_SCH_WAIT_INSTR = 141 /* 0x8d */, + HAL_TQM_FLOWMPTY_STATUS = 143 /* 0x8f */, + HAL_TQM_FLOW_NOTMPTY_STATUS = 144 /* 0x90 */, + HAL_TQM_GEN_MPDU_LENGTH_LIST = 145 /* 0x91 */, + HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 146 /* 0x92 */, + HAL_TQM_GEN_MPDUS = 147 /* 0x93 */, + HAL_TQM_GEN_MPDUS_STATUS = 148 /* 0x94 */, + HAL_TQM_REMOVE_MPDU = 149 /* 0x95 */, + HAL_TQM_REMOVE_MPDU_STATUS = 150 /* 0x96 */, + HAL_TQM_REMOVE_MSDU = 151 /* 0x97 */, + HAL_TQM_REMOVE_MSDU_STATUS = 152 /* 0x98 */, + HAL_TQM_UPDATE_TX_MPDU_COUNT = 153 /* 0x99 */, + HAL_TQM_WRITE_CMD = 154 /* 0x9a */, + HAL_OFDMA_TRIGGER_DETAILS = 155 /* 0x9b */, + HAL_TX_DATA = 156 /* 0x9c */, + HAL_TX_FES_SETUP = 157 /* 0x9d */, + HAL_RX_PACKET = 158 /* 0x9e */, + HALXPECTED_RESPONSE = 159 /* 0x9f */, + HAL_TX_MPDU_END = 160 /* 0xa0 */, + HAL_TX_MPDU_START = 161 /* 0xa1 */, + HAL_TX_MSDU_END = 162 /* 0xa2 */, + HAL_TX_MSDU_START = 163 /* 0xa3 */, + HAL_TX_SW_MODE_SETUP = 164 /* 0xa4 */, + HAL_TXPCU_BUFFER_STATUS = 165 /* 0xa5 */, + HAL_TXPCU_USER_BUFFER_STATUS = 166 /* 0xa6 */, + HAL_DATA_TO_TIME_CONFIG = 167 /* 0xa7 */, + HALXAMPLE_USER_TLV_32 = 168 /* 0xa8 */, + HAL_MPDU_INFO = 169 /* 0xa9 */, + HAL_PDG_USER_SETUP = 170 /* 0xaa */, + HAL_TX_11AH_SETUP = 171 /* 0xab */, + HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 172 /* 0xac */, + HAL_TX_PEER_ENTRY = 173 /* 0xad */, + HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 174 /* 0xae */, + HALXAMPLE_USER_TLV_44 = 175 /* 0xaf */, + HAL_TX_FLUSH = 176 /* 0xb0 */, + HAL_TX_FLUSH_REQ = 177 /* 0xb1 */, + HAL_TQM_WRITE_CMD_STATUS = 178 /* 0xb2 */, + HAL_TQM_GET_MPDU_QUEUE_STATS = 179 /* 0xb3 */, + HAL_TQM_GET_MSDU_FLOW_STATS = 180 /* 0xb4 */, + HALXAMPLE_USER_CTLV_44 = 181 /* 0xb5 */, + HAL_TX_FES_STATUS_START = 182 /* 0xb6 */, + HAL_TX_FES_STATUS_USER_PPDU = 183 /* 0xb7 */, + HAL_TX_FES_STATUS_USER_RESPONSE = 184 /* 0xb8 */, + HAL_TX_FES_STATUS_END = 185 /* 0xb9 */, + HAL_RX_TRIG_INFO = 186 /* 0xba */, + HAL_RXPCU_TX_SETUP_CLEAR = 187 /* 0xbb */, + HAL_RX_FRAME_BITMAP_REQ = 188 /* 0xbc */, + HAL_RX_FRAME_BITMAP_ACK = 189 /* 0xbd */, + HAL_COEX_RX_STATUS = 190 /* 0xbe */, + HAL_RX_START_PARAM = 191 /* 0xbf */, + HAL_RX_PPDU_START = 192 /* 0xc0 */, + HAL_RX_PPDU_END = 193 /* 0xc1 */, + HAL_RX_MPDU_START = 194 /* 0xc2 */, + HAL_RX_MPDU_END = 195 /* 0xc3 */, + HAL_RX_MSDU_START = 196 /* 0xc4 */, + HAL_RX_MSDU_END = 197 /* 0xc5 */, + HAL_RX_ATTENTION = 198 /* 0xc6 */, + HAL_RECEIVED_RESPONSE_INFO = 199 /* 0xc7 */, + HAL_RX_PHY_SLEEP = 200 /* 0xc8 */, + HAL_RX_HEADER = 201 /* 0xc9 */, + HAL_RX_PEER_ENTRY = 202 /* 0xca */, + HAL_RX_FLUSH = 203 /* 0xcb */, + HAL_RX_RESPONSE_REQUIRED_INFO = 204 /* 0xcc */, + HAL_RX_FRAMELESS_BAR_DETAILS = 205 /* 0xcd */, + HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 206 /* 0xce */, + HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 207 /* 0xcf */, + HAL_TX_CBF_INFO = 208 /* 0xd0 */, + HAL_PCU_PPDU_SETUP_USER = 209 /* 0xd1 */, + HAL_RX_MPDU_PCU_START = 210 /* 0xd2 */, + HAL_RX_PM_INFO = 211 /* 0xd3 */, + HAL_RX_USER_PPDU_END = 212 /* 0xd4 */, + HAL_RX_PRE_PPDU_START = 213 /* 0xd5 */, + HAL_RX_PREAMBLE = 214 /* 0xd6 */, + HAL_TX_FES_SETUP_COMPLETE = 215 /* 0xd7 */, + HAL_TX_LAST_MPDU_FETCHED = 216 /* 0xd8 */, + HAL_TXDMA_STOP_REQUEST = 217 /* 0xd9 */, + HAL_RXPCU_SETUP = 218 /* 0xda */, + HAL_RXPCU_USER_SETUP = 219 /* 0xdb */, + HAL_TX_FES_STATUS_ACK_OR_BA = 220 /* 0xdc */, + HAL_TQM_ACKED_MPDU = 221 /* 0xdd */, + HAL_COEX_TX_RESP = 222 /* 0xde */, + HAL_COEX_TX_STATUS = 223 /* 0xdf */, + HAL_MACTX_COEX_PHY_CTRL = 224 /* 0xe0 */, + HAL_COEX_STATUS_BROADCAST = 225 /* 0xe1 */, + HAL_RESPONSE_START_STATUS = 226 /* 0xe2 */, + HAL_RESPONSEND_STATUS = 227 /* 0xe3 */, + HAL_CRYPTO_STATUS = 228 /* 0xe4 */, + HAL_RECEIVED_TRIGGER_INFO = 229 /* 0xe5 */, + HAL_COEX_TX_STOP_CTRL = 230 /* 0xe6 */, + HAL_RX_PPDU_ACK_REPORT = 231 /* 0xe7 */, + HAL_RX_PPDU_NO_ACK_REPORT = 232 /* 0xe8 */, + HAL_SCH_COEX_STATUS = 233 /* 0xe9 */, + HAL_SCHEDULER_COMMAND_STATUS = 234 /* 0xea */, + HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 235 /* 0xeb */, + HAL_TX_FES_STATUS_PROT = 236 /* 0xec */, + HAL_TX_FES_STATUS_START_PPDU = 237 /* 0xed */, + HAL_TX_FES_STATUS_START_PROT = 238 /* 0xee */, + HAL_TXPCU_PHYTX_DEBUG32 = 239 /* 0xef */, + HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 240 /* 0xf0 */, + HAL_TX_MPDU_COUNT_TRANSFERND = 241 /* 0xf1 */, + HAL_WHO_ANCHOR_OFFSET = 242 /* 0xf2 */, + HAL_WHO_ANCHOR_VALUE = 243 /* 0xf3 */, + HAL_WHO_CCE_INFO = 244 /* 0xf4 */, + HAL_WHO_COMMIT = 245 /* 0xf5 */, + HAL_WHO_COMMIT_DONE = 246 /* 0xf6 */, + HAL_WHO_FLUSH = 247 /* 0xf7 */, + HAL_WHO_L2_LLC = 248 /* 0xf8 */, + HAL_WHO_L2_PAYLOAD = 249 /* 0xf9 */, + HAL_WHO_L3_CHECKSUM = 250 /* 0xfa */, + HAL_WHO_L3_INFO = 251 /* 0xfb */, + HAL_WHO_L4_CHECKSUM = 252 /* 0xfc */, + HAL_WHO_L4_INFO = 253 /* 0xfd */, + HAL_WHO_MSDU = 254 /* 0xfe */, + HAL_WHO_MSDU_MISC = 255 /* 0xff */, + HAL_WHO_PACKET_DATA = 256 /* 0x100 */, + HAL_WHO_PACKET_HDR = 257 /* 0x101 */, + HAL_WHO_PPDU_END = 258 /* 0x102 */, + HAL_WHO_PPDU_START = 259 /* 0x103 */, + HAL_WHO_TSO = 260 /* 0x104 */, + HAL_WHO_WMAC_HEADER_PV0 = 261 /* 0x105 */, + HAL_WHO_WMAC_HEADER_PV1 = 262 /* 0x106 */, + HAL_WHO_WMAC_IV = 263 /* 0x107 */, + HAL_MPDU_INFO_END = 264 /* 0x108 */, + HAL_MPDU_INFO_BITMAP = 265 /* 0x109 */, + HAL_TX_QUEUE_EXTENSION = 266 /* 0x10a */, + HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 267 /* 0x10b */, + HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 268 /* 0x10c */, + HAL_TQM_ACKED_MPDU_STATUS = 269 /* 0x10d */, + HAL_TQM_ADD_MSDU_STATUS = 270 /* 0x10e */, + HAL_TQM_LIST_GEN_DONE = 271 /* 0x10f */, + HAL_WHO_TERMINATE = 272 /* 0x110 */, + HAL_TX_LAST_MPDU_END = 273 /* 0x111 */, + HAL_TX_CV_DATA = 274 /* 0x112 */, + HAL_PPDU_TX_END = 275 /* 0x113 */, + HAL_PROT_TX_END = 276 /* 0x114 */, + HAL_MPDU_INFO_GLOBAL_END = 277 /* 0x115 */, + HAL_TQM_SCH_INSTR_GLOBAL_END = 278 /* 0x116 */, + HAL_RX_PPDU_END_USER_STATS = 279 /* 0x117 */, + HAL_RX_PPDU_END_USER_STATS_EXT = 280 /* 0x118 */, + HAL_REO_GET_QUEUE_STATS = 281 /* 0x119 */, + HAL_REO_FLUSH_QUEUE = 282 /* 0x11a */, + HAL_REO_FLUSH_CACHE = 283 /* 0x11b */, + HAL_REO_UNBLOCK_CACHE = 284 /* 0x11c */, + HAL_REO_GET_QUEUE_STATS_STATUS = 285 /* 0x11d */, + HAL_REO_FLUSH_QUEUE_STATUS = 286 /* 0x11e */, + HAL_REO_FLUSH_CACHE_STATUS = 287 /* 0x11f */, + HAL_REO_UNBLOCK_CACHE_STATUS = 288 /* 0x120 */, + HAL_TQM_FLUSH_CACHE = 289 /* 0x121 */, + HAL_TQM_UNBLOCK_CACHE = 290 /* 0x122 */, + HAL_TQM_FLUSH_CACHE_STATUS = 291 /* 0x123 */, + HAL_TQM_UNBLOCK_CACHE_STATUS = 292 /* 0x124 */, + HAL_RX_PPDU_END_STATUS_DONE = 293 /* 0x125 */, + HAL_RX_STATUS_BUFFER_DONE = 294 /* 0x126 */, + HAL_TX_DATA_SYNC = 297 /* 0x129 */, + HAL_PHYRX_CBF_READ_REQUEST_ACK = 298 /* 0x12a */, + HAL_TQM_GET_MPDU_HEAD_INFO = 299 /* 0x12b */, + HAL_TQM_SYNC_CMD = 300 /* 0x12c */, + HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 301 /* 0x12d */, + HAL_TQM_SYNC_CMD_STATUS = 302 /* 0x12e */, + HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 303 /* 0x12f */, + HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 304 /* 0x130 */, + HAL_REO_FLUSH_TIMEOUT_LIST = 305 /* 0x131 */, + HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 306 /* 0x132 */, + HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 307 /* 0x133 */, + HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 308 /* 0x134 */, + HALXAMPLE_USER_TLV_32_NAME = 309 /* 0x135 */, + HAL_RX_PPDU_START_USER_INFO = 310 /* 0x136 */, + HAL_RX_RING_MASK = 311 /* 0x137 */, + HAL_COEX_MAC_NAP = 312 /* 0x138 */, + HAL_RXPCU_PPDU_END_INFO = 313 /* 0x139 */, + HAL_WHO_MESH_CONTROL = 314 /* 0x13a */, + HAL_PDG_SW_MODE_BW_START = 315 /* 0x13b */, + HAL_PDG_SW_MODE_BW_END = 316 /* 0x13c */, + HAL_PDG_WAIT_FOR_MAC_REQUEST = 317 /* 0x13d */, + HAL_PDG_WAIT_FOR_PHY_REQUEST = 318 /* 0x13e */, + HAL_SCHEDULER_END = 319 /* 0x13f */, + HAL_RX_PPDU_START_DROPPED = 320 /* 0x140 */, + HAL_RX_PPDU_END_DROPPED = 321 /* 0x141 */, + HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 322 /* 0x142 */, + HAL_RX_MPDU_START_DROPPED = 323 /* 0x143 */, + HAL_RX_MSDU_START_DROPPED = 324 /* 0x144 */, + HAL_RX_MSDU_END_DROPPED = 325 /* 0x145 */, + HAL_RX_MPDU_END_DROPPED = 326 /* 0x146 */, + HAL_RX_ATTENTION_DROPPED = 327 /* 0x147 */, + HAL_TXPCU_USER_SETUP = 328 /* 0x148 */, + HAL_RXPCU_USER_SETUP_EXT = 329 /* 0x149 */, + HAL_CMD_PART_0_END = 330 /* 0x14a */, + HAL_MACTX_SYNTH_ON = 331 /* 0x14b */, + HAL_SCH_CRITICAL_TLV_REFERENCE = 332 /* 0x14c */, + HAL_TQM_MPDU_GLOBAL_START = 333 /* 0x14d */, + HALXAMPLE_TLV_32 = 334 /* 0x14e */, + HAL_TQM_UPDATE_TX_MSDU_FLOW = 335 /* 0x14f */, + HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 336 /* 0x150 */, + HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 337 /* 0x151 */, + HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 338 /* 0x152 */, + HAL_REO_UPDATE_RX_REO_QUEUE = 339 /* 0x153 */, + HAL_TQM_MPDU_QUEUEMPTY_STATUS = 340 /* 0x154 */, + HAL_TQM_2_SCH_MPDU_AVAILABLE = 341 /* 0x155 */, + HAL_PDG_TRIG_RESPONSE = 342 /* 0x156 */, + HAL_TRIGGER_RESPONSE_TX_DONE = 343 /* 0x157 */, + HAL_ABORT_FROM_PHYRX_DETAILS = 344 /* 0x158 */, + HAL_SCH_TQM_CMD_WRAPPER = 345 /* 0x159 */, + HAL_MPDUS_AVAILABLE = 346 /* 0x15a */, + HAL_RECEIVED_RESPONSE_INFO_PART2 = 347 /* 0x15b */, + HAL_PHYRX_TX_START_TIMING = 348 /* 0x15c */, + HAL_TXPCU_PREAMBLE_DONE = 349 /* 0x15d */, + HAL_NDP_PREAMBLE_DONE = 350 /* 0x15e */, + HAL_SCH_TQM_CMD_WRAPPER_RBO_DROP = 351 /* 0x15f */, + HAL_SCH_TQM_CMD_WRAPPER_CONT_DROP = 352 /* 0x160 */, + HAL_MACTX_CLEAR_PREV_TX_INFO = 353 /* 0x161 */, + HAL_TX_PUNCTURE_SETUP = 354 /* 0x162 */, + HAL_R2R_STATUS_END = 355 /* 0x163 */, + HAL_MACTX_PREFETCH_CV_COMMON = 356 /* 0x164 */, + HAL_END_OF_FLUSH_MARKER = 357 /* 0x165 */, + HAL_MACTX_MU_UPLINK_COMMON_PUNC = 358 /* 0x166 */, + HAL_MACTX_MU_UPLINK_USER_SETUP_PUNC = 359 /* 0x167 */, + HAL_RECEIVED_RESPONSE_USER_7_0 = 360 /* 0x168 */, + HAL_RECEIVED_RESPONSE_USER_15_8 = 361 /* 0x169 */, + HAL_RECEIVED_RESPONSE_USER_23_16 = 362 /* 0x16a */, + HAL_RECEIVED_RESPONSE_USER_31_24 = 363 /* 0x16b */, + HAL_RECEIVED_RESPONSE_USER_36_32 = 364 /* 0x16c */, + HAL_TX_LOOPBACK_SETUP = 365 /* 0x16d */, + HAL_PHYRX_OTHER_RECEIVE_INFO_RU_DETAILS = 366 /* 0x16e */, + HAL_SCH_WAIT_INSTR_TX_PATH = 367 /* 0x16f */, + HAL_MACTX_OTHER_TRANSMIT_INFO_TX2TX = 368 /* 0x170 */, + HAL_MACTX_OTHER_TRANSMIT_INFOMUPHY_SETUP = 369 /* 0x171 */, + HAL_PHYRX_OTHER_RECEIVE_INFOVM_DETAILS = 370 /* 0x172 */, + HAL_TX_WUR_DATA = 371 /* 0x173 */, + HAL_RX_PPDU_END_START = 372 /* 0x174 */, + HAL_RX_PPDU_END_MIDDLE = 373 /* 0x175 */, + HAL_RX_PPDU_END_LAST = 374 /* 0x176 */, + HAL_MACTX_BACKOFF_BASED_TRANSMISSION = 375 /* 0x177 */, + HAL_MACTX_OTHER_TRANSMIT_INFO_DL_OFDMA_TX = 376 /* 0x178 */, + HAL_SRP_INFO = 377 /* 0x179 */, + HAL_OBSS_SR_INFO = 378 /* 0x17a */, + HAL_SCHEDULER_SW_MSG_STATUS = 379 /* 0x17b */, + HAL_HWSCH_RXPCU_MAC_INFO_ANNOUNCEMENT = 380 /* 0x17c */, + HAL_RXPCU_SETUP_COMPLETE = 381 /* 0x17d */, + HAL_SNOOP_PPDU_START = 382 /* 0x17e */, + HAL_SNOOP_MPDU_USR_DBG_INFO = 383 /* 0x17f */, + HAL_SNOOP_MSDU_USR_DBG_INFO = 384 /* 0x180 */, + HAL_SNOOP_MSDU_USR_DATA = 385 /* 0x181 */, + HAL_SNOOP_MPDU_USR_STAT_INFO = 386 /* 0x182 */, + HAL_SNOOP_PPDU_END = 387 /* 0x183 */, + HAL_SNOOP_SPARE = 388 /* 0x184 */, + HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_COMMON = 390 /* 0x186 */, + HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_USER = 391 /* 0x187 */, + HAL_MACTX_OTHER_TRANSMIT_INFO_SCH_DETAILS = 392 /* 0x188 */, + HAL_PHYRX_OTHER_RECEIVE_INFO_108PVM_DETAILS = 393 /* 0x189 */, + HAL_SCH_TLV_WRAPPER = 394 /* 0x18a */, + HAL_SCHEDULER_STATUS_WRAPPER = 395 /* 0x18b */, + HAL_MPDU_INFO_6X = 396 /* 0x18c */, + HAL_MACTX_11AZ_USER_DESC_PER_USER = 397 /* 0x18d */, + HAL_MACTX_U_SIGHT_SU_MU = 398 /* 0x18e */, + HAL_MACTX_U_SIGHT_TB = 399 /* 0x18f */, + HAL_PHYRX_U_SIGHT_SU_MU = 403 /* 0x193 */, + HAL_PHYRX_U_SIGHT_TB = 404 /* 0x194 */, + HAL_MACRX_LMR_READ_REQUEST = 408 /* 0x198 */, + HAL_MACRX_LMR_DATA_REQUEST = 409 /* 0x199 */, + HAL_PHYRX_LMR_TRANSFER_DONE = 410 /* 0x19a */, + HAL_PHYRX_LMR_TRANSFER_ABORT = 411 /* 0x19b */, + HAL_PHYRX_LMR_READ_REQUEST_ACK = 412 /* 0x19c */, + HAL_MACRX_SECURE_LTF_SEQ_PTR = 413 /* 0x19d */, + HAL_PHYRX_USER_INFO_MU_UL = 414 /* 0x19e */, + HAL_MPDU_QUEUE_OVERVIEW = 415 /* 0x19f */, + HAL_SCHEDULER_NAV_INFO = 416 /* 0x1a0 */, + HAL_LMR_PEER_ENTRY = 418 /* 0x1a2 */, + HAL_LMR_MPDU_START = 419 /* 0x1a3 */, + HAL_LMR_DATA = 420 /* 0x1a4 */, + HAL_LMR_MPDU_END = 421 /* 0x1a5 */, + HAL_REO_GET_QUEUE_1K_STATS_STATUS = 422 /* 0x1a6 */, + HAL_RX_FRAME_1K_BITMAP_ACK = 423 /* 0x1a7 */, + HAL_TX_FES_STATUS_1K_BA = 424 /* 0x1a8 */, + HAL_TQM_ACKED_1K_MPDU = 425 /* 0x1a9 */, + HAL_MACRX_INBSS_OBSS_IND = 426 /* 0x1aa */, + HAL_PHYRX_LOCATION = 427 /* 0x1ab */, + HAL_MLO_TX_NOTIFICATION_SU = 428 /* 0x1ac */, + HAL_MLO_TX_NOTIFICATION_MU = 429 /* 0x1ad */, + HAL_MLO_TX_REQ_SU = 430 /* 0x1ae */, + HAL_MLO_TX_REQ_MU = 431 /* 0x1af */, + HAL_MLO_TX_RESP = 432 /* 0x1b0 */, + HAL_MLO_RX_NOTIFICATION = 433 /* 0x1b1 */, + HAL_MLO_BKOFF_TRUNC_REQ = 434 /* 0x1b2 */, + HAL_MLO_TBTT_NOTIFICATION = 435 /* 0x1b3 */, + HAL_MLO_MESSAGE = 436 /* 0x1b4 */, + HAL_MLO_TS_SYNC_MSG = 437 /* 0x1b5 */, + HAL_MLO_FES_SETUP = 438 /* 0x1b6 */, + HAL_MLO_PDG_FES_SETUP_SU = 439 /* 0x1b7 */, + HAL_MLO_PDG_FES_SETUP_MU = 440 /* 0x1b8 */, + HAL_MPDU_INFO_1K_BITMAP = 441 /* 0x1b9 */, + HAL_MON_BUF_ADDR = 442 /* 0x1ba */, + HAL_TX_FRAG_STATE = 443 /* 0x1bb */, + HAL_MACTXHT_SIG_USR_OFDMA = 446 /* 0x1be */, + HAL_PHYRXHT_SIG_CMN_PUNC = 448 /* 0x1c0 */, + HAL_PHYRXHT_SIG_CMN_OFDMA = 450 /* 0x1c2 */, + HAL_PHYRXHT_SIG_USR_OFDMA = 454 /* 0x1c6 */, + HAL_PHYRX_PKT_END_PART1 = 456 /* 0x1c8 */, + HAL_MACTXXPECT_NDP_RECEPTION = 457 /* 0x1c9 */, + HAL_MACTX_SECURE_LTF_SEQ_PTR = 458 /* 0x1ca */, + HAL_MLO_PDG_BKOFF_TRUNC_NOTIFY = 460 /* 0x1cc */, + HAL_PHYRX_11AZ_INTEGRITY_DATA = 461 /* 0x1cd */, + HAL_PHYTX_LOCATION = 462 /* 0x1ce */, + HAL_PHYTX_11AZ_INTEGRITY_DATA = 463 /* 0x1cf */, + HAL_MACTXHT_SIG_USR_SU = 466 /* 0x1d2 */, + HAL_MACTXHT_SIG_USR_MU_MIMO = 467 /* 0x1d3 */, + HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */, + HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */, + HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */, + HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */, + HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */, + HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */, + HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */, + HAL_OVERWRITE_RESP_END = 475 /* 0x1db */, + HAL_RXPCUARLY_RX_INDICATION = 476 /* 0x1dc */, + HAL_MON_DROP = 477 /* 0x1dd */, + HAL_MACRX_MU_UPLINK_COMMON_SNIFF = 478 /* 0x1de */, + HAL_MACRX_MU_UPLINK_USER_SETUP_SNIFF = 479 /* 0x1df */, + HAL_MACRX_MU_UPLINK_USER_SEL_SNIFF = 480 /* 0x1e0 */, + HAL_MACRX_MU_UPLINK_FCS_STATUS_SNIFF = 481 /* 0x1e1 */, + HAL_MACTX_PREFETCH_CV_DMA = 482 /* 0x1e2 */, + HAL_MACTX_PREFETCH_CV_PER_USER = 483 /* 0x1e3 */, + HAL_PHYRX_OTHER_RECEIVE_INFO_ALL_SIGB_DETAILS = 484 /* 0x1e4 */, + HAL_MACTX_BF_PARAMS_UPDATE_COMMON = 485 /* 0x1e5 */, + HAL_MACTX_BF_PARAMS_UPDATE_PER_USER = 486 /* 0x1e6 */, + HAL_RANGING_USER_DETAILS = 487 /* 0x1e7 */, + HAL_PHYTX_CV_CORR_STATUS = 488 /* 0x1e8 */, + HAL_PHYTX_CV_CORR_COMMON = 489 /* 0x1e9 */, + HAL_PHYTX_CV_CORR_USER = 490 /* 0x1ea */, + HAL_MACTX_CV_CORR_COMMON = 491 /* 0x1eb */, + HAL_MACTX_CV_CORR_MAC_INFO_GROUP = 492 /* 0x1ec */, + HAL_BW_PUNCTUREVAL_WRAPPER = 493 /* 0x1ed */, + HAL_MACTX_RX_NOTIFICATION_FOR_PHY = 494 /* 0x1ee */, + HAL_MACTX_TX_NOTIFICATION_FOR_PHY = 495 /* 0x1ef */, + HAL_MACTX_MU_UPLINK_COMMON_PER_BW = 496 /* 0x1f0 */, + HAL_MACTX_MU_UPLINK_USER_SETUP_PER_BW = 497 /* 0x1f1 */, + HAL_RX_PPDU_END_USER_STATS_EXT2 = 498 /* 0x1f2 */, + HAL_FW2SW_MON = 499 /* 0x1f3 */, + HAL_WSI_DIRECT_MESSAGE = 500 /* 0x1f4 */, + HAL_MACTXMLSR_PRE_SWITCH = 501 /* 0x1f5 */, + HAL_MACTXMLSR_SWITCH = 502 /* 0x1f6 */, + HAL_MACTXMLSR_SWITCH_BACK = 503 /* 0x1f7 */, + HAL_PHYTXMLSR_SWITCH_ACK = 504 /* 0x1f8 */, + HAL_PHYTXMLSR_SWITCH_BACK_ACK = 505 /* 0x1f9 */, + HAL_SPARE_REUSE_TAG_0 = 506 /* 0x1fa */, + HAL_SPARE_REUSE_TAG_1 = 507 /* 0x1fb */, + HAL_SPARE_REUSE_TAG_2 = 508 /* 0x1fc */, + HAL_SPARE_REUSE_TAG_3 = 509 /* 0x1fd */, + /* FIXME: Assign correct value for HAL_TCL_DATA_CMD */ + HAL_TCL_DATA_CMD = 510, + HAL_TLV_BASE = 511 /* 0x1ff */, +}; + +#define HAL_TLV_HDR_TAG GENMASK(9, 1) +#define HAL_TLV_HDR_LEN GENMASK(25, 10) +#define HAL_TLV_USR_ID GENMASK(31, 26) + +#define HAL_TLV_ALIGN 4 + +struct hal_tlv_hdr { + __le32 tl; + u8 value[]; +} __packed; + +#define HAL_TLV_64_HDR_TAG GENMASK(9, 1) +#define HAL_TLV_64_HDR_LEN GENMASK(21, 10) + +struct hal_tlv_64_hdr { + u64 tl; + u8 value[]; +} __packed; + +#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0) +#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(8) +#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(9) +#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(10) +#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(11) +#define RX_MPDU_DESC_INFO0_VALID_PN BIT(12) +#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(13) +#define RX_MPDU_DESC_INFO0_MORE_FRAG_FLAG BIT(14) +#define RX_MPDU_DESC_INFO0_SRC_INFO GENMASK(26, 15) +#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27) +#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28) + +/* TODO revisit after meta data is concluded */ +#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) + +struct rx_mpdu_desc { + __le32 info0; /* %RX_MPDU_DESC_INFO */ + __le32 peer_meta_data; +} __packed; + +/* rx_mpdu_desc + * Producer: RXDMA + * Consumer: REO/SW/FW + * + * msdu_count + * The number of MSDUs within the MPDU + * + * fragment_flag + * When set, this MPDU is a fragment and REO should forward this + * fragment MPDU to the REO destination ring without any reorder + * checks, pn checks or bitmap update. This implies that REO is + * forwarding the pointer to the MSDU link descriptor. + * + * mpdu_retry_bit + * The retry bit setting from the MPDU header of the received frame + * + * ampdu_flag + * Indicates the MPDU was received as part of an A-MPDU. + * + * bar_frame + * Indicates the received frame is a BAR frame. After processing, + * this frame shall be pushed to SW or deleted. + * + * valid_pn + * When not set, REO will not perform a PN sequence number check. + * + * raw_mpdu + * Field only valid when first_msdu_in_mpdu_flag is set. Indicates + * the contents in the MSDU buffer contains a 'RAW' MPDU. This + * 'RAW' MPDU might be spread out over multiple MSDU buffers. + * + * more_fragment_flag + * The More Fragment bit setting from the MPDU header of the + * received frame + * + * src_info + * Source (Virtual) device/interface info associated with this peer. + * This field gets passed on by REO to PPE in the EDMA descriptor. + * + * mpdu_qos_control_valid + * When set, the MPDU has a QoS control field + * + * tid + * Field only valid when mpdu_qos_control_valid is set + */ + +enum hal_rx_msdu_desc_reo_dest_ind { + HAL_RX_MSDU_DESC_REO_DEST_IND_TCL, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW1, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW2, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW3, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW4, + HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE, + HAL_RX_MSDU_DESC_REO_DEST_IND_FW, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW5, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW6, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW7, + HAL_RX_MSDU_DESC_REO_DEST_IND_SW8, +}; + +#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0) +#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1) +#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2) +#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3) +#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(17) +#define RX_MSDU_DESC_INFO0_VALID_SA BIT(18) +#define RX_MSDU_DESC_INFO0_VALID_DA BIT(19) +#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(20) +#define RX_MSDU_DESC_INFO0_L3_HDR_PAD_MSB BIT(21) +#define RX_MSDU_DESC_INFO0_TCP_UDP_CHKSUM_FAIL BIT(22) +#define RX_MSDU_DESC_INFO0_IP_CHKSUM_FAIL BIT(23) +#define RX_MSDU_DESC_INFO0_FROM_DS BIT(24) +#define RX_MSDU_DESC_INFO0_TO_DS BIT(25) +#define RX_MSDU_DESC_INFO0_INTRA_BSS BIT(26) +#define RX_MSDU_DESC_INFO0_DST_CHIP_ID GENMASK(28, 27) +#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29) + +#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \ + (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH)) + +struct rx_msdu_desc { + __le32 info0; +} __packed; + +/* rx_msdu_desc + * + * first_msdu_in_mpdu + * Indicates first msdu in mpdu. + * + * last_msdu_in_mpdu + * Indicates last msdu in mpdu. This flag can be true only when + * 'Msdu_continuation' set to 0. This implies that when an msdu + * is spread out over multiple buffers and thus msdu_continuation + * is set, only for the very last buffer of the msdu, can the + * 'last_msdu_in_mpdu' be set. + * + * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set, + * the MPDU that this MSDU belongs to only contains a single MSDU. + * + * msdu_continuation + * When set, this MSDU buffer was not able to hold the entire MSDU. + * The next buffer will therefor contain additional information + * related to this MSDU. + * + * msdu_length + * Field is only valid in combination with the 'first_msdu_in_mpdu' + * being set. Full MSDU length in bytes after decapsulation. This + * field is still valid for MPDU frames without A-MSDU. It still + * represents MSDU length after decapsulation Or in case of RAW + * MPDUs, it indicates the length of the entire MPDU (without FCS + * field). + * + * msdu_drop + * Indicates that REO shall drop this MSDU and not forward it to + * any other ring. + * + * valid_sa + * Indicates OLE found a valid SA entry for this MSDU. + * + * valid_da + * When set, OLE found a valid DA entry for this MSDU. + * + * da_mcbc + * Field Only valid if valid_da is set. Indicates the DA address + * is a Multicast or Broadcast address for this MSDU. + * + * l3_header_padding_msb + * Passed on from 'RX_MSDU_END' TLV (only the MSB is reported as + * the LSB is always zero). Number of bytes padded to make sure + * that the L3 header will always start of a Dword boundary + * + * tcp_udp_checksum_fail + * Passed on from 'RX_ATTENTION' TLV + * Indicates that the computed checksum did not match the checksum + * in the TCP/UDP header. + * + * ip_checksum_fail + * Passed on from 'RX_ATTENTION' TLV + * Indicates that the computed checksum did not match the checksum + * in the IP header. + * + * from_DS + * Set if the 'from DS' bit is set in the frame control. + * + * to_DS + * Set if the 'to DS' bit is set in the frame control. + * + * intra_bss + * This packet needs intra-BSS routing by SW as the 'vdev_id' + * for the destination is the same as the 'vdev_id' that this + * MSDU was got in. + * + * dest_chip_id + * If intra_bss is set, copied by RXOLE/RXDMA from 'ADDR_SEARCH_ENTRY' + * to support intra-BSS routing with multi-chip multi-link operation. + * This indicates into which chip's TCL the packet should be queued. + * + * decap_format + * Indicates the format after decapsulation: + */ + +#define RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND GENMASK(4, 0) +#define RX_MSDU_EXT_DESC_INFO0_SERVICE_CODE GENMASK(13, 5) +#define RX_MSDU_EXT_DESC_INFO0_PRIORITY_VALID BIT(14) +#define RX_MSDU_EXT_DESC_INFO0_DATA_OFFSET GENMASK(26, 15) +#define RX_MSDU_EXT_DESC_INFO0_SRC_LINK_ID GENMASK(29, 27) + +struct rx_msdu_ext_desc { + __le32 info0; +} __packed; + +/* rx_msdu_ext_desc + * + * reo_destination_indication + * The ID of the REO exit ring where the MSDU frame shall push + * after (MPDU level) reordering has finished. + * + * service_code + * Opaque service code between PPE and Wi-Fi + * + * priority_valid + * + * data_offset + * The offset to Rx packet data within the buffer (including + * Rx DMA offset programming and L3 header padding inserted + * by Rx OLE). + * + * src_link_id + * Set to the link ID of the PMAC that received the frame + */ + +enum hal_reo_dest_ring_buffer_type { + HAL_REO_DEST_RING_BUFFER_TYPE_MSDU, + HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC, +}; + +enum hal_reo_dest_ring_push_reason { + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED, + HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION, +}; + +enum hal_reo_dest_ring_error_code { + HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO, + HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID, + HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA, + HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE, + HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE, + HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP, + HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP, + HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR, + HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR, + HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION, + HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN, + HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED, + HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET, + HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET, + HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED, + HAL_REO_DEST_RING_ERROR_CODE_MAX, +}; + +#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(0) +#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(2, 1) +#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(7, 3) +#define HAL_REO_DEST_RING_INFO0_MSDU_DATA_SIZE GENMASK(11, 8) +#define HAL_REO_DEST_RING_INFO0_SW_EXCEPTION BIT(12) +#define HAL_REO_DEST_RING_INFO0_SRC_LINK_ID GENMASK(15, 13) +#define HAL_REO_DEST_RING_INFO0_SIGNATURE GENMASK(19, 16) +#define HAL_REO_DEST_RING_INFO0_RING_ID GENMASK(27, 20) +#define HAL_REO_DEST_RING_INFO0_LOOPING_COUNT GENMASK(31, 28) + +struct hal_reo_dest_ring { + struct ath12k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + struct rx_msdu_desc rx_msdu_info; + __le32 buf_va_lo; + __le32 buf_va_hi; + __le32 info0; /* %HAL_REO_DEST_RING_INFO0_ */ +} __packed; + +/* hal_reo_dest_ring + * + * Producer: RXDMA + * Consumer: REO/SW/FW + * + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + * + * rx_mpdu_info + * General information related to the MPDU that is passed + * on from REO entrance ring to the REO destination ring. + * + * rx_msdu_info + * General information related to the MSDU that is passed + * on from RXDMA all the way to the REO destination ring. + * + * buf_va_lo + * Field only valid if Reo_dest_buffer_type is set to MSDU_buf_address + * Lower 32 bits of the 64-bit virtual address corresponding + * to Buf_or_link_desc_addr_info + * + * buf_va_hi + * Address (upper 32 bits) of the REO queue descriptor. + * Upper 32 bits of the 64-bit virtual address corresponding + * to Buf_or_link_desc_addr_info + * + * buffer_type + * Indicates the type of address provided in the buf_addr_info. + * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_. + * + * push_reason + * Reason for pushing this frame to this exit ring. Values are + * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_. + * + * error_code + * Valid only when 'push_reason' is set. All error codes are + * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_. + * + * captured_msdu_data_size + * The number of following REO_DESTINATION STRUCTs that have + * been replaced with msdu_data extracted from the msdu_buffer + * and copied into the ring for easy FW/SW access. + * + * sw_exception + * This field has the same setting as the SW_exception field + * in the corresponding REO_entrance_ring descriptor. + * When set, the REO entrance descriptor is generated by FW, + * and the MPDU was processed in the following way: + * - NO re-order function is needed. + * - MPDU delinking is determined by the setting of Entrance + * ring field: SW_excection_mpdu_delink + * - Destination ring selection is based on the setting of + * the Entrance ring field SW_exception_destination _ring_valid + * + * src_link_id + * Set to the link ID of the PMAC that received the frame + * + * signature + * Set to value 0x8 when msdu capture mode is enabled for this ring + * + * ring_id + * The buffer pointer ring id. + * 0 - Idle ring + * 1 - N refers to other rings. + * + * looping_count + * Indicates the number of times the producer of entries into + * this ring has looped around the ring. + */ + +#define HAL_REO_TO_PPE_RING_INFO0_DATA_LENGTH GENMASK(15, 0) +#define HAL_REO_TO_PPE_RING_INFO0_DATA_OFFSET GENMASK(23, 16) +#define HAL_REO_TO_PPE_RING_INFO0_POOL_ID GENMASK(28, 24) +#define HAL_REO_TO_PPE_RING_INFO0_PREHEADER BIT(29) +#define HAL_REO_TO_PPE_RING_INFO0_TSO_EN BIT(30) +#define HAL_REO_TO_PPE_RING_INFO0_MORE BIT(31) + +struct hal_reo_to_ppe_ring { + __le32 buffer_addr; + __le32 info0; /* %HAL_REO_TO_PPE_RING_INFO0_ */ +} __packed; + +/* hal_reo_to_ppe_ring + * + * Producer: REO + * Consumer: PPE + * + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + * + * data_length + * Length of valid data in bytes + * + * data_offset + * Offset to the data from buffer pointer. Can be used to + * strip header in the data for tunnel termination etc. + * + * pool_id + * REO has global configuration register for this field. + * It may have several free buffer pools, each + * RX-Descriptor ring can fetch free buffer from specific + * buffer pool; pool id will indicate which pool the buffer + * will be released to; POOL_ID Zero returned to SW + * + * preheader + * Disabled: 0 (Default) + * Enabled: 1 + * + * tso_en + * Disabled: 0 (Default) + * Enabled: 1 + * + * more + * More Segments followed + */ + +enum hal_reo_entr_rxdma_push_reason { + HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED, + HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION, + HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_RX_FLUSH, +}; + +enum hal_reo_entr_rxdma_ecode { + HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_MAX, +}; + +enum hal_rx_reo_dest_ring { + HAL_RX_REO_DEST_RING_TCL, + HAL_RX_REO_DEST_RING_SW1, + HAL_RX_REO_DEST_RING_SW2, + HAL_RX_REO_DEST_RING_SW3, + HAL_RX_REO_DEST_RING_SW4, + HAL_RX_REO_DEST_RING_RELEASE, + HAL_RX_REO_DEST_RING_FW, + HAL_RX_REO_DEST_RING_SW5, + HAL_RX_REO_DEST_RING_SW6, + HAL_RX_REO_DEST_RING_SW7, + HAL_RX_REO_DEST_RING_SW8, +}; + +#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0) +#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8) +#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22) +#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27) + +#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0) +#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2) +#define HAL_REO_ENTR_RING_INFO1_MPDU_FRAG_NUM GENMASK(10, 7) +#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION BIT(11) +#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPT_MPDU_DELINK BIT(12) +#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING_VLD BIT(13) +#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING GENMASK(18, 14) +#define HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM GENMASK(30, 19) + +#define HAL_REO_ENTR_RING_INFO2_PHY_PPDU_ID GENMASK(15, 0) +#define HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID GENMASK(18, 16) +#define HAL_REO_ENTR_RING_INFO2_RING_ID GENMASK(27, 20) +#define HAL_REO_ENTR_RING_INFO2_LOOPING_COUNT GENMASK(31, 28) + +struct hal_reo_entrance_ring { + struct ath12k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + __le32 queue_addr_lo; + __le32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */ + __le32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */ + __le32 info2; /* %HAL_REO_DEST_RING_INFO2_ */ + +} __packed; + +/* hal_reo_entrance_ring + * + * Producer: RXDMA + * Consumer: REO + * + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + * + * rx_mpdu_info + * General information related to the MPDU that is passed + * on from REO entrance ring to the REO destination ring. + * + * queue_addr_lo + * Address (lower 32 bits) of the REO queue descriptor. + * + * queue_addr_hi + * Address (upper 8 bits) of the REO queue descriptor. + * + * mpdu_byte_count + * An approximation of the number of bytes received in this MPDU. + * Used to keeps stats on the amount of data flowing + * through a queue. + * + * reo_destination_indication + * The id of the reo exit ring where the msdu frame shall push + * after (MPDU level) reordering has finished. Values are defined + * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_. + * + * frameless_bar + * Indicates that this REO entrance ring struct contains BAR info + * from a multi TID BAR frame. The original multi TID BAR frame + * itself contained all the REO info for the first TID, but all + * the subsequent TID info and their linkage to the REO descriptors + * is passed down as 'frameless' BAR info. + * + * The only fields valid in this descriptor when this bit is set + * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number, + * bar_frame and peer_meta_data. + * + * rxdma_push_reason + * Reason for pushing this frame to this exit ring. Values are + * defined in enum %HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_. + * + * rxdma_error_code + * Valid only when 'push_reason' is set. All error codes are + * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_. + * + * mpdu_fragment_number + * Field only valid when Reo_level_mpdu_frame_info. + * Rx_mpdu_desc_info_details.Fragment_flag is set. + * + * sw_exception + * When not set, REO is performing all its default MPDU processing + * operations, + * When set, this REO entrance descriptor is generated by FW, and + * should be processed as an exception. This implies: + * NO re-order function is needed. + * MPDU delinking is determined by the setting of field + * SW_excection_mpdu_delink + * + * sw_exception_mpdu_delink + * Field only valid when SW_exception is set. + * 1'b0: REO should NOT delink the MPDU, and thus pass this + * MPDU on to the destination ring as is. This implies that + * in the REO_DESTINATION_RING struct field + * Buf_or_link_desc_addr_info should point to an MSDU link + * descriptor + * 1'b1: REO should perform the normal MPDU delink into MSDU operations. + * + * sw_exception_dest_ring + * Field only valid when fields SW_exception and SW + * exception_destination_ring_valid are set. values are defined + * in %HAL_RX_REO_DEST_RING_. + * + * mpdu_seq_number + * The field can have two different meanings based on the setting + * of sub-field Reo level mpdu frame info. + * Rx_mpdu_desc_info_details. BAR_frame + * 'BAR_frame' is NOT set: + * The MPDU sequence number of the received frame. + * 'BAR_frame' is set. + * The MPDU Start sequence number from the BAR frame + * + * phy_ppdu_id + * A PPDU counter value that PHY increments for every PPDU received + * + * src_link_id + * Set to the link ID of the PMAC that received the frame + * + * ring_id + * The buffer pointer ring id. + * 0 - Idle ring + * 1 - N refers to other rings. + * + * looping_count + * Indicates the number of times the producer of entries into + * this ring has looped around the ring. + */ + +#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0) +#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16) + +struct hal_reo_cmd_hdr { + __le32 info0; +} __packed; + +#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0) +#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8) + +struct hal_reo_get_queue_stats { + struct hal_reo_cmd_hdr cmd; + __le32 queue_addr_lo; + __le32 info0; + __le32 rsvd0[6]; + __le32 tlv64_pad; +} __packed; + +/* hal_reo_get_queue_stats + * Producer: SW + * Consumer: REO + * + * cmd + * Details for command execution tracking purposes. + * + * queue_addr_lo + * Address (lower 32 bits) of the REO queue descriptor. + * + * queue_addr_hi + * Address (upper 8 bits) of the REO queue descriptor. + * + * clear_stats + * Clear stats settings. When set, Clear the stats after + * generating the status. + * + * Following stats will be cleared. + * Timeout_count + * Forward_due_to_bar_count + * Duplicate_count + * Frames_in_order_count + * BAR_received_count + * MPDU_Frames_processed_count + * MSDU_Frames_processed_count + * Total_processed_byte_count + * Late_receive_MPDU_count + * window_jump_2k + * Hole_count + */ + +#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0) +#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8) +#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9) + +struct hal_reo_flush_queue { + struct hal_reo_cmd_hdr cmd; + __le32 desc_addr_lo; + __le32 info0; + __le32 rsvd0[6]; +} __packed; + +#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0) +#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8) +#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9) +#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10) +#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12) +#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13) +#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14) + +struct hal_reo_flush_cache { + struct hal_reo_cmd_hdr cmd; + __le32 cache_addr_lo; + __le32 info0; + __le32 rsvd0[6]; +} __packed; + +#define HAL_TCL_DATA_CMD_INFO0_CMD_TYPE BIT(0) +#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(1) +#define HAL_TCL_DATA_CMD_INFO0_BANK_ID GENMASK(7, 2) +#define HAL_TCL_DATA_CMD_INFO0_TX_NOTIFY_FRAME GENMASK(10, 8) +#define HAL_TCL_DATA_CMD_INFO0_HDR_LEN_READ_SEL BIT(11) +#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP GENMASK(30, 12) +#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP_VLD BIT(31) + +#define HAL_TCL_DATA_CMD_INFO1_CMD_NUM GENMASK(31, 16) + +#define HAL_TCL_DATA_CMD_INFO2_DATA_LEN GENMASK(15, 0) +#define HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN BIT(16) +#define HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN BIT(17) +#define HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN BIT(18) +#define HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN BIT(19) +#define HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN BIT(20) +#define HAL_TCL_DATA_CMD_INFO2_TO_FW BIT(21) +#define HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET GENMASK(31, 23) + +#define HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE BIT(0) +#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE_EN BIT(1) +#define HAL_TCL_DATA_CMD_INFO3_CLASSIFY_INFO_SEL GENMASK(3, 2) +#define HAL_TCL_DATA_CMD_INFO3_TID GENMASK(7, 4) +#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE BIT(8) +#define HAL_TCL_DATA_CMD_INFO3_PMAC_ID GENMASK(10, 9) +#define HAL_TCL_DATA_CMD_INFO3_MSDU_COLOR GENMASK(12, 11) +#define HAL_TCL_DATA_CMD_INFO3_VDEV_ID GENMASK(31, 24) + +#define HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX GENMASK(19, 0) +#define HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM GENMASK(23, 20) +#define HAL_TCL_DATA_CMD_INFO4_IDX_LOOKUP_OVERRIDE BIT(24) + +#define HAL_TCL_DATA_CMD_INFO5_RING_ID GENMASK(27, 20) +#define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT GENMASK(31, 28) + +enum hal_encrypt_type { + HAL_ENCRYPT_TYPE_WEP_40, + HAL_ENCRYPT_TYPE_WEP_104, + HAL_ENCRYPT_TYPE_TKIP_NO_MIC, + HAL_ENCRYPT_TYPE_WEP_128, + HAL_ENCRYPT_TYPE_TKIP_MIC, + HAL_ENCRYPT_TYPE_WAPI, + HAL_ENCRYPT_TYPE_CCMP_128, + HAL_ENCRYPT_TYPE_OPEN, + HAL_ENCRYPT_TYPE_CCMP_256, + HAL_ENCRYPT_TYPE_GCMP_128, + HAL_ENCRYPT_TYPE_AES_GCMP_256, + HAL_ENCRYPT_TYPE_WAPI_GCM_SM4, +}; + +enum hal_tcl_encap_type { + HAL_TCL_ENCAP_TYPE_RAW, + HAL_TCL_ENCAP_TYPE_NATIVE_WIFI, + HAL_TCL_ENCAP_TYPE_ETHERNET, + HAL_TCL_ENCAP_TYPE_802_3 = 3, +}; + +enum hal_tcl_desc_type { + HAL_TCL_DESC_TYPE_BUFFER, + HAL_TCL_DESC_TYPE_EXT_DESC, +}; + +enum hal_wbm_htt_tx_comp_status { + HAL_WBM_REL_HTT_TX_COMP_STATUS_OK, + HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP, + HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL, + HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ, + HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT, + HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY, + HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX, +}; + +struct hal_tcl_data_cmd { + struct ath12k_buffer_addr buf_addr_info; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 info5; +} __packed; + +/* hal_tcl_data_cmd + * + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + * + * tcl_cmd_type + * used to select the type of TCL Command decriptor + * + * desc_type + * Indicates the type of address provided in the buf_addr_info. + * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_. + * + * bank_id + * used to select one of the TCL register banks for fields removed + * from 'TCL_DATA_CMD' that do not change often within one virtual + * device or a set of virtual devices: + * + * tx_notify_frame + * TCL copies this value to 'TQM_ENTRANCE_RING' field FW_tx_notify_frame. + * + * hdr_length_read_sel + * used to select the per 'encap_type' register set for MSDU header + * read length + * + * buffer_timestamp + * buffer_timestamp_valid + * Frame system entrance timestamp. It shall be filled by first + * module (SW, TCL or TQM) that sees the frames first. + * + * cmd_num + * This number can be used to match against status. + * + * data_length + * MSDU length in case of direct descriptor. Length of link + * extension descriptor in case of Link extension descriptor. + * + * *_checksum_en + * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6, + * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6. + * + * to_fw + * Forward packet to FW along with classification result. The + * packet will not be forward to TQM when this bit is set. + * 1'b0: Use classification result to forward the packet. + * 1'b1: Override classification result & forward packet only to fw + * + * packet_offset + * Packet offset from Metadata in case of direct buffer descriptor. + * + * hlos_tid_overwrite + * + * When set, TCL shall ignore the IP DSCP and VLAN PCP + * fields and use HLOS_TID as the final TID. Otherwise TCL + * shall consider the DSCP and PCP fields as well as HLOS_TID + * and choose a final TID based on the configured priority + * + * flow_override_enable + * TCL uses this to select the flow pointer from the peer table, + * which can be overridden by SW for pre-encrypted raw WiFi packets + * that cannot be parsed for UDP or for other MLO + * 0 - FP_PARSE_IP: Use the flow-pointer based on parsing the IPv4 + * or IPv6 header. + * 1 - FP_USE_OVERRIDE: Use the who_classify_info_sel and + * flow_override fields to select the flow-pointer + * + * who_classify_info_sel + * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE. + * This field is used to select one of the 'WHO_CLASSIFY_INFO's in the + * peer table in case more than 2 flows are mapped to a single TID. + * 0: To choose Flow 0 and 1 of any TID use this value. + * 1: To choose Flow 2 and 3 of any TID use this value. + * 2: To choose Flow 4 and 5 of any TID use this value. + * 3: To choose Flow 6 and 7 of any TID use this value. + * + * If who_classify_info sel is not in sync with the num_tx_classify_info + * field from address search, then TCL will set 'who_classify_info_sel' + * to 0 use flows 0 and 1. + * + * hlos_tid + * HLOS MSDU priority + * Field is used when HLOS_TID_overwrite is set. + * + * flow_override + * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE + * TCL uses this to select the flow pointer from the peer table, + * which can be overridden by SW for pre-encrypted raw WiFi packets + * that cannot be parsed for UDP or for other MLO + * 0 - FP_USE_NON_UDP: Use the non-UDP flow pointer (flow 0) + * 1 - FP_USE_UDP: Use the UDP flow pointer (flow 1) + * + * pmac_id + * TCL uses this PMAC_ID in address search, i.e, while + * finding matching entry for the packet in AST corresponding + * to given PMAC_ID + * + * If PMAC ID is all 1s (=> value 3), it indicates wildcard + * match for any PMAC + * + * vdev_id + * Virtual device ID to check against the address search entry to + * avoid security issues from transmitting packets from an incorrect + * virtual device + * + * search_index + * The index that will be used for index based address or + * flow search. The field is valid when 'search_type' is 1 or 2. + * + * cache_set_num + * + * Cache set number that should be used to cache the index + * based search results, for address and flow search. This + * value should be equal to LSB four bits of the hash value of + * match data, in case of search index points to an entry which + * may be used in content based search also. The value can be + * anything when the entry pointed by search index will not be + * used for content based search. + * + * index_loop_override + * When set, address search and packet routing is forced to use + * 'search_index' instead of following the register configuration + * seleced by Bank_id. + * + * ring_id + * The buffer pointer ring ID. + * 0 refers to the IDLE ring + * 1 - N refers to other rings + * + * looping_count + * + * A count value that indicates the number of times the + * producer of entries into the Ring has looped around the + * ring. + * + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * Also note that SW if it wants only needs to look at the + * LSB bit of this count value. + */ + +#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd) + +#define HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO GENMASK(31, 0) + +#define HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI GENMASK(7, 0) +#define HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE BIT(8) +#define HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE GENMASK(10, 9) +#define HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE GENMASK(14, 11) +#define HAL_TX_MSDU_EXT_INFO1_BUF_LEN GENMASK(31, 16) + +struct hal_tx_msdu_ext_desc { + __le32 rsvd0[6]; + __le32 info0; + __le32 info1; + __le32 rsvd1[10]; +}; + +struct hal_tcl_gse_cmd { + __le32 ctrl_buf_addr_lo; + __le32 info0; + __le32 meta_data[2]; + __le32 rsvd0[2]; + __le32 info1; +} __packed; + +/* hal_tcl_gse_cmd + * + * ctrl_buf_addr_lo, ctrl_buf_addr_hi + * Address of a control buffer containing additional info needed + * for this command execution. + * + * meta_data + * Meta data to be returned in the status descriptor + */ + +enum hal_tcl_cache_op_res { + HAL_TCL_CACHE_OP_RES_DONE, + HAL_TCL_CACHE_OP_RES_NOT_FOUND, + HAL_TCL_CACHE_OP_RES_TIMEOUT, +}; + +struct hal_tcl_status_ring { + __le32 info0; + __le32 msdu_byte_count; + __le32 msdu_timestamp; + __le32 meta_data[2]; + __le32 info1; + __le32 rsvd0; + __le32 info2; +} __packed; + +/* hal_tcl_status_ring + * + * msdu_cnt + * msdu_byte_count + * MSDU count of Entry and MSDU byte count for entry 1. + * + */ + +#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0) +#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8) +#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9) +#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10) +#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11) +#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16) + +#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0) + +#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20) +#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT + +struct hal_ce_srng_src_desc { + __le32 buffer_addr_low; + __le32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */ + __le32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */ + __le32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */ +} __packed; + +/* hal_ce_srng_src_desc + * + * buffer_addr_lo + * LSB 32 bits of the 40 Bit Pointer to the source buffer + * + * buffer_addr_hi + * MSB 8 bits of the 40 Bit Pointer to the source buffer + * + * toeplitz_en + * Enable generation of 32-bit Toeplitz-LFSR hash for + * data transfer. In case of gather field in first source + * ring entry of the gather copy cycle in taken into account. + * + * src_swap + * Treats source memory organization as big-endian. For + * each dword read (4 bytes), the byte 0 is swapped with byte 3 + * and byte 1 is swapped with byte 2. + * In case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * dest_swap + * Treats destination memory organization as big-endian. + * For each dword write (4 bytes), the byte 0 is swapped with + * byte 3 and byte 1 is swapped with byte 2. + * In case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * gather + * Enables gather of multiple copy engine source + * descriptors to one destination. + * + * ce_res_0 + * Reserved + * + * + * length + * Length of the buffer in units of octets of the current + * descriptor + * + * fw_metadata + * Meta data used by FW. + * In case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * ce_res_1 + * Reserved + * + * ce_res_2 + * Reserved + * + * ring_id + * The buffer pointer ring ID. + * 0 refers to the IDLE ring + * 1 - N refers to other rings + * Helps with debugging when dumping ring contents. + * + * looping_count + * A count value that indicates the number of times the + * producer of entries into the Ring has looped around the + * ring. + * + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * Also note that SW if it wants only needs to look at the + * LSB bit of this count value. + */ + +#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0) +#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20) +#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT + +struct hal_ce_srng_dest_desc { + __le32 buffer_addr_low; + __le32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */ +} __packed; + +/* hal_ce_srng_dest_desc + * + * dst_buffer_low + * LSB 32 bits of the 40 Bit Pointer to the Destination + * buffer + * + * dst_buffer_high + * MSB 8 bits of the 40 Bit Pointer to the Destination + * buffer + * + * ce_res_4 + * Reserved + * + * ring_id + * The buffer pointer ring ID. + * 0 refers to the IDLE ring + * 1 - N refers to other rings + * Helps with debugging when dumping ring contents. + * + * looping_count + * A count value that indicates the number of times the + * producer of entries into the Ring has looped around the + * ring. + * + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * Also note that SW if it wants only needs to look at the + * LSB bit of this count value. + */ + +#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8) +#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9) +#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10) +#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11) +#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16) + +#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0) +#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20) +#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT + +struct hal_ce_srng_dst_status_desc { + __le32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */ + __le32 toeplitz_hash0; + __le32 toeplitz_hash1; + __le32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */ +} __packed; + +/* hal_ce_srng_dst_status_desc + * + * ce_res_5 + * Reserved + * + * toeplitz_en + * + * src_swap + * Source memory buffer swapped + * + * dest_swap + * Destination memory buffer swapped + * + * gather + * Gather of multiple copy engine source descriptors to one + * destination enabled + * + * ce_res_6 + * Reserved + * + * length + * Sum of all the Lengths of the source descriptor in the + * gather chain + * + * toeplitz_hash_0 + * 32 LS bits of 64 bit Toeplitz LFSR hash result + * + * toeplitz_hash_1 + * 32 MS bits of 64 bit Toeplitz LFSR hash result + * + * fw_metadata + * Meta data used by FW + * In case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * ce_res_7 + * Reserved + * + * ring_id + * The buffer pointer ring ID. + * 0 refers to the IDLE ring + * 1 - N refers to other rings + * Helps with debugging when dumping ring contents. + * + * looping_count + * A count value that indicates the number of times the + * producer of entries into the Ring has looped around the + * ring. + * + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * Also note that SW if it wants only needs to look at the + * LSB bit of this count value. + */ + +#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0) +#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(3, 1) +#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(7, 4) +#define HAL_TX_RATE_STATS_INFO0_STBC BIT(8) +#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(9) +#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(11, 10) +#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(15, 12) +#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(16) +#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(28, 17) + +enum hal_tx_rate_stats_bw { + HAL_TX_RATE_STATS_BW_20, + HAL_TX_RATE_STATS_BW_40, + HAL_TX_RATE_STATS_BW_80, + HAL_TX_RATE_STATS_BW_160, +}; + +enum hal_tx_rate_stats_pkt_type { + HAL_TX_RATE_STATS_PKT_TYPE_11A, + HAL_TX_RATE_STATS_PKT_TYPE_11B, + HAL_TX_RATE_STATS_PKT_TYPE_11N, + HAL_TX_RATE_STATS_PKT_TYPE_11AC, + HAL_TX_RATE_STATS_PKT_TYPE_11AX, + HAL_TX_RATE_STATS_PKT_TYPE_11BA, + HAL_TX_RATE_STATS_PKT_TYPE_11BE, +}; + +enum hal_tx_rate_stats_sgi { + HAL_TX_RATE_STATS_SGI_08US, + HAL_TX_RATE_STATS_SGI_04US, + HAL_TX_RATE_STATS_SGI_16US, + HAL_TX_RATE_STATS_SGI_32US, +}; + +struct hal_tx_rate_stats { + __le32 info0; + __le32 tsf; +} __packed; + +struct hal_wbm_link_desc { + struct ath12k_buffer_addr buf_addr_info; +} __packed; + +/* hal_wbm_link_desc + * + * Producer: WBM + * Consumer: WBM + * + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + */ + +enum hal_wbm_rel_src_module { + HAL_WBM_REL_SRC_MODULE_TQM, + HAL_WBM_REL_SRC_MODULE_RXDMA, + HAL_WBM_REL_SRC_MODULE_REO, + HAL_WBM_REL_SRC_MODULE_FW, + HAL_WBM_REL_SRC_MODULE_SW, +}; + +enum hal_wbm_rel_desc_type { + HAL_WBM_REL_DESC_TYPE_REL_MSDU, + HAL_WBM_REL_DESC_TYPE_MSDU_LINK, + HAL_WBM_REL_DESC_TYPE_MPDU_LINK, + HAL_WBM_REL_DESC_TYPE_MSDU_EXT, + HAL_WBM_REL_DESC_TYPE_QUEUE_EXT, +}; + +/* hal_wbm_rel_desc_type + * + * msdu_buffer + * The address points to an MSDU buffer + * + * msdu_link_descriptor + * The address points to an Tx MSDU link descriptor + * + * mpdu_link_descriptor + * The address points to an MPDU link descriptor + * + * msdu_ext_descriptor + * The address points to an MSDU extension descriptor + * + * queue_ext_descriptor + * The address points to an TQM queue extension descriptor. WBM should + * treat this is the same way as a link descriptor. + */ + +enum hal_wbm_rel_bm_act { + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE, + HAL_WBM_REL_BM_ACT_REL_MSDU, +}; + +/* hal_wbm_rel_bm_act + * + * put_in_idle_list + * Put the buffer or descriptor back in the idle list. In case of MSDU or + * MDPU link descriptor, BM does not need to check to release any + * individual MSDU buffers. + * + * release_msdu_list + * This BM action can only be used in combination with desc_type being + * msdu_link_descriptor. Field first_msdu_index points out which MSDU + * pointer in the MSDU link descriptor is the first of an MPDU that is + * released. BM shall release all the MSDU buffers linked to this first + * MSDU buffer pointer. All related MSDU buffer pointer entries shall be + * set to value 0, which represents the 'NULL' pointer. When all MSDU + * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link + * descriptor itself shall also be released. + */ +#define HAL_WBM_COMPL_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0) +#define HAL_WBM_COMPL_RX_INFO0_BM_ACTION GENMASK(5, 3) +#define HAL_WBM_COMPL_RX_INFO0_DESC_TYPE GENMASK(8, 6) +#define HAL_WBM_COMPL_RX_INFO0_RBM GENMASK(12, 9) +#define HAL_WBM_COMPL_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17) +#define HAL_WBM_COMPL_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19) +#define HAL_WBM_COMPL_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24) +#define HAL_WBM_COMPL_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26) +#define HAL_WBM_COMPL_RX_INFO0_WBM_INTERNAL_ERROR BIT(31) + +#define HAL_WBM_COMPL_RX_INFO1_PHY_ADDR_HI GENMASK(7, 0) +#define HAL_WBM_COMPL_RX_INFO1_SW_COOKIE GENMASK(27, 8) +#define HAL_WBM_COMPL_RX_INFO1_LOOPING_COUNT GENMASK(31, 28) + +struct hal_wbm_completion_ring_rx { + __le32 addr_lo; + __le32 addr_hi; + __le32 info0; + struct rx_mpdu_desc rx_mpdu_info; + struct rx_msdu_desc rx_msdu_info; + __le32 phy_addr_lo; + __le32 info1; +} __packed; + +#define HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0) +#define HAL_WBM_COMPL_TX_INFO0_DESC_TYPE GENMASK(8, 6) +#define HAL_WBM_COMPL_TX_INFO0_RBM GENMASK(12, 9) +#define HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON GENMASK(16, 13) +#define HAL_WBM_COMPL_TX_INFO0_RBM_OVERRIDE_VLD BIT(17) +#define HAL_WBM_COMPL_TX_INFO0_SW_COOKIE_LO GENMASK(29, 18) +#define HAL_WBM_COMPL_TX_INFO0_CC_DONE BIT(30) +#define HAL_WBM_COMPL_TX_INFO0_WBM_INTERNAL_ERROR BIT(31) + +#define HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0) +#define HAL_WBM_COMPL_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24) +#define HAL_WBM_COMPL_TX_INFO1_SW_REL_DETAILS_VALID BIT(31) + +#define HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0) +#define HAL_WBM_COMPL_TX_INFO2_FIRST_MSDU BIT(8) +#define HAL_WBM_COMPL_TX_INFO2_LAST_MSDU BIT(9) +#define HAL_WBM_COMPL_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10) +#define HAL_WBM_COMPL_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13) + +#define HAL_WBM_COMPL_TX_INFO3_PEER_ID GENMASK(15, 0) +#define HAL_WBM_COMPL_TX_INFO3_TID GENMASK(19, 16) +#define HAL_WBM_COMPL_TX_INFO3_SW_COOKIE_HI GENMASK(27, 20) +#define HAL_WBM_COMPL_TX_INFO3_LOOPING_COUNT GENMASK(31, 28) + +struct hal_wbm_completion_ring_tx { + __le32 buf_va_lo; + __le32 buf_va_hi; + __le32 info0; + __le32 info1; + __le32 info2; + struct hal_tx_rate_stats rate_stats; + __le32 info3; +} __packed; + +#define HAL_WBM_RELEASE_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0) +#define HAL_WBM_RELEASE_TX_INFO0_BM_ACTION GENMASK(5, 3) +#define HAL_WBM_RELEASE_TX_INFO0_DESC_TYPE GENMASK(8, 6) +#define HAL_WBM_RELEASE_TX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9) +#define HAL_WBM_RELEASE_TX_INFO0_TQM_RELEASE_REASON GENMASK(18, 13) +#define HAL_WBM_RELEASE_TX_INFO0_RBM_OVERRIDE_VLD BIT(17) +#define HAL_WBM_RELEASE_TX_INFO0_SW_BUFFER_COOKIE_11_0 GENMASK(29, 18) +#define HAL_WBM_RELEASE_TX_INFO0_WBM_INTERNAL_ERROR BIT(31) + +#define HAL_WBM_RELEASE_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0) +#define HAL_WBM_RELEASE_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24) +#define HAL_WBM_RELEASE_TX_INFO1_SW_REL_DETAILS_VALID BIT(31) + +#define HAL_WBM_RELEASE_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0) +#define HAL_WBM_RELEASE_TX_INFO2_FIRST_MSDU BIT(8) +#define HAL_WBM_RELEASE_TX_INFO2_LAST_MSDU BIT(9) +#define HAL_WBM_RELEASE_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10) +#define HAL_WBM_RELEASE_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13) + +#define HAL_WBM_RELEASE_TX_INFO3_PEER_ID GENMASK(15, 0) +#define HAL_WBM_RELEASE_TX_INFO3_TID GENMASK(19, 16) +#define HAL_WBM_RELEASE_TX_INFO3_SW_BUFFER_COOKIE_19_12 GENMASK(27, 20) +#define HAL_WBM_RELEASE_TX_INFO3_LOOPING_COUNT GENMASK(31, 28) + +struct hal_wbm_release_ring_tx { + struct ath12k_buffer_addr buf_addr_info; + __le32 info0; + __le32 info1; + __le32 info2; + struct hal_tx_rate_stats rate_stats; + __le32 info3; +} __packed; + +#define HAL_WBM_RELEASE_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0) +#define HAL_WBM_RELEASE_RX_INFO0_BM_ACTION GENMASK(5, 3) +#define HAL_WBM_RELEASE_RX_INFO0_DESC_TYPE GENMASK(8, 6) +#define HAL_WBM_RELEASE_RX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9) +#define HAL_WBM_RELEASE_RX_INFO0_CC_STATUS BIT(16) +#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17) +#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19) +#define HAL_WBM_RELEASE_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24) +#define HAL_WBM_RELEASE_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26) +#define HAL_WBM_RELEASE_RX_INFO0_WBM_INTERNAL_ERROR BIT(31) + +#define HAL_WBM_RELEASE_RX_INFO2_RING_ID GENMASK(27, 20) +#define HAL_WBM_RELEASE_RX_INFO2_LOOPING_COUNT GENMASK(31, 28) + +struct hal_wbm_release_ring_rx { + struct ath12k_buffer_addr buf_addr_info; + __le32 info0; + struct rx_mpdu_desc rx_mpdu_info; + struct rx_msdu_desc rx_msdu_info; + __le32 info1; + __le32 info2; +} __packed; + +#define HAL_WBM_RELEASE_RX_CC_INFO0_RBM GENMASK(12, 9) +#define HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE GENMASK(27, 8) +/* Used when hw cc is success */ +struct hal_wbm_release_ring_cc_rx { + __le32 buf_va_lo; + __le32 buf_va_hi; + __le32 info0; + struct rx_mpdu_desc rx_mpdu_info; + struct rx_msdu_desc rx_msdu_info; + __le32 buf_pa_lo; + __le32 info1; +} __packed; + +#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0) +#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3) +#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6) +#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17) +#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19) +#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24) +#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26) +#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31) + +#define HAL_WBM_RELEASE_INFO3_FIRST_MSDU BIT(0) +#define HAL_WBM_RELEASE_INFO3_LAST_MSDU BIT(1) +#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2) + +#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28) + +struct hal_wbm_release_ring { + struct ath12k_buffer_addr buf_addr_info; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 info5; +} __packed; + +/* hal_wbm_release_ring + * + * Producer: SW/TQM/RXDMA/REO/SWITCH + * Consumer: WBM/SW/FW + * + * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5 + * for software based completions. + * + * buf_addr_info + * Details of the physical address of the buffer or link descriptor. + * + * release_source_module + * Indicates which module initiated the release of this buffer/descriptor. + * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_. + * + * buffer_or_desc_type + * Field only valid when WBM is marked as the return_buffer_manager in + * the Released_Buffer_address_info. Indicates that type of buffer or + * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE. + * + * wbm_internal_error + * Is set when WBM got a buffer pointer but the action was to push it to + * the idle link descriptor ring or do link related activity OR + * Is set when WBM got a link buffer pointer but the action was to push it + * to the buffer descriptor ring. + * + * looping_count + * A count value that indicates the number of times the + * producer of entries into the Buffer Manager Ring has looped + * around the ring. + * + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * Also note that SW if it wants only needs to look at the + * LSB bit of this count value. + */ + +/** + * enum hal_wbm_tqm_rel_reason - TQM release reason code + * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus + * initiated by sw. + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus + * initiated by sw. + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or + * mpdus. + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by + * fw with fw_reason1. + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by + * fw with fw_reason2. + * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by + * fw with fw_reason3. + */ +enum hal_wbm_tqm_rel_reason { + HAL_WBM_TQM_REL_REASON_FRAME_ACKED, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2, + HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3, +}; + +struct hal_wbm_buffer_ring { + struct ath12k_buffer_addr buf_addr_info; +}; + +enum hal_mon_end_reason { + HAL_MON_STATUS_BUFFER_FULL, + HAL_MON_FLUSH_DETECTED, + HAL_MON_END_OF_PPDU, + HAL_MON_PPDU_TRUNCATED, +}; + +#define HAL_SW_MONITOR_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0) +#define HAL_SW_MONITOR_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2) +#define HAL_SW_MONITOR_RING_INFO0_MPDU_FRAGMENT_NUMBER GENMASK(10, 7) +#define HAL_SW_MONITOR_RING_INFO0_FRAMELESS_BAR BIT(11) +#define HAL_SW_MONITOR_RING_INFO0_STATUS_BUF_COUNT GENMASK(15, 12) +#define HAL_SW_MONITOR_RING_INFO0_END_OF_PPDU BIT(16) + +#define HAL_SW_MONITOR_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0) +#define HAL_SW_MONITOR_RING_INFO1_RING_ID GENMASK(27, 20) +#define HAL_SW_MONITOR_RING_INFO1_LOOPING_COUNT GENMASK(31, 28) + +struct hal_sw_monitor_ring { + struct ath12k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + struct ath12k_buffer_addr status_buff_addr_info; + __le32 info0; /* %HAL_SW_MONITOR_RING_INFO0 */ + __le32 info1; /* %HAL_SW_MONITOR_RING_INFO1 */ +} __packed; + +/* hal_sw_monitor_ring + * + * Producer: RXDMA + * Consumer: REO/SW/FW + * buf_addr_info + * Details of the physical address of a buffer or MSDU + * link descriptor. + * + * rx_mpdu_info + * Details related to the MPDU being pushed to SW, valid + * only if end_of_ppdu is set to 0. + * + * status_buff_addr_info + * Details of the physical address of the first status + * buffer used for the PPDU (either the PPDU that included the + * MPDU being pushed to SW if end_of_ppdu = 0, or the PPDU + * whose end is indicated through end_of_ppdu = 1) + * + * rxdma_push_reason + * Indicates why RXDMA pushed the frame to this ring + * + * <enum 0 rxdma_error_detected> RXDMA detected an error an + * pushed this frame to this queue + * + * <enum 1 rxdma_routing_instruction> RXDMA pushed the + * frame to this queue per received routing instructions. No + * error within RXDMA was detected + * + * <enum 2 rxdma_rx_flush> RXDMA received an RX_FLUSH. As a + * result the MSDU link descriptor might not have the + * last_msdu_in_mpdu_flag set, but instead WBM might just see a + * NULL pointer in the MSDU link descriptor. This is to be + * considered a normal condition for this scenario. + * + * rxdma_error_code + * Field only valid when rxdma_push_reason is set to + * 'rxdma_error_detected.' + * + * <enum 0 rxdma_overflow_err>MPDU frame is not complete + * due to a FIFO overflow error in RXPCU. + * + * <enum 1 rxdma_mpdu_length_err>MPDU frame is not complete + * due to receiving incomplete MPDU from the PHY + * + * <enum 3 rxdma_decrypt_err>CRYPTO reported a decryption + * error or CRYPTO received an encrypted frame, but did not get + * a valid corresponding key id in the peer entry. + * + * <enum 4 rxdma_tkip_mic_err>CRYPTO reported a TKIP MIC + * error + * + * <enum 5 rxdma_unecrypted_err>CRYPTO reported an + * unencrypted frame error when encrypted was expected + * + * <enum 6 rxdma_msdu_len_err>RX OLE reported an MSDU + * length error + * + * <enum 7 rxdma_msdu_limit_err>RX OLE reported that max + * number of MSDUs allowed in an MPDU got exceeded + * + * <enum 8 rxdma_wifi_parse_err>RX OLE reported a parsing + * error + * + * <enum 9 rxdma_amsdu_parse_err>RX OLE reported an A-MSDU + * parsing error + * + * <enum 10 rxdma_sa_timeout_err>RX OLE reported a timeout + * during SA search + * + * <enum 11 rxdma_da_timeout_err>RX OLE reported a timeout + * during DA search + * + * <enum 12 rxdma_flow_timeout_err>RX OLE reported a + * timeout during flow search + * + * <enum 13 rxdma_flush_request>RXDMA received a flush + * request + * + * <enum 14 rxdma_amsdu_fragment_err>Rx PCU reported A-MSDU + * present as well as a fragmented MPDU. + * + * mpdu_fragment_number + * Field only valid when Reo_level_mpdu_frame_info. + * Rx_mpdu_desc_info_details.Fragment_flag is set and + * end_of_ppdu is set to 0. + * + * The fragment number from the 802.11 header. + * + * Note that the sequence number is embedded in the field: + * Reo_level_mpdu_frame_info. Rx_mpdu_desc_info_details. + * Mpdu_sequence_number + * + * frameless_bar + * When set, this SW monitor ring struct contains BAR info + * from a multi TID BAR frame. The original multi TID BAR frame + * itself contained all the REO info for the first TID, but all + * the subsequent TID info and their linkage to the REO + * descriptors is passed down as 'frameless' BAR info. + * + * The only fields valid in this descriptor when this bit + * is within the + * + * Reo_level_mpdu_frame_info: + * Within Rx_mpdu_desc_info_details: + * Mpdu_Sequence_number + * BAR_frame + * Peer_meta_data + * All other fields shall be set to 0. + * + * status_buf_count + * A count of status buffers used so far for the PPDU + * (either the PPDU that included the MPDU being pushed to SW + * if end_of_ppdu = 0, or the PPDU whose end is indicated + * through end_of_ppdu = 1) + * + * end_of_ppdu + * Some hw RXDMA can be configured to generate a separate + * 'SW_MONITOR_RING' descriptor at the end of a PPDU (either + * through an 'RX_PPDU_END' TLV or through an 'RX_FLUSH') to + * demarcate PPDUs. + * + * For such a descriptor, this bit is set to 1 and fields + * Reo_level_mpdu_frame_info, mpdu_fragment_number and + * Frameless_bar are all set to 0. + * + * Otherwise this bit is set to 0. + * + * phy_ppdu_id + * A PPDU counter value that PHY increments for every PPDU + * received + * + * The counter value wraps around. Some hw RXDMA can be + * configured to copy this from the RX_PPDU_START TLV for every + * output descriptor. + * + * ring_id + * For debugging. + * This field is filled in by the SRNG module. + * It help to identify the ring that is being looked + * + * looping_count + * For debugging. + * This field is filled in by the SRNG module. + * + * A count value that indicates the number of times the + * producer of entries into this Ring has looped around the + * ring. + * At initialization time, this value is set to 0. On the + * first loop, this value is set to 1. After the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * In case SW is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. This eliminates the need to + * check where the head pointer' of the ring is located once + * the SW starts processing an interrupt indicating that new + * entries have been put into this ring... + */ + +enum hal_desc_owner { + HAL_DESC_OWNER_WBM, + HAL_DESC_OWNER_SW, + HAL_DESC_OWNER_TQM, + HAL_DESC_OWNER_RXDMA, + HAL_DESC_OWNER_REO, + HAL_DESC_OWNER_SWITCH, +}; + +enum hal_desc_buf_type { + HAL_DESC_BUF_TYPE_TX_MSDU_LINK, + HAL_DESC_BUF_TYPE_TX_MPDU_LINK, + HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD, + HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT, + HAL_DESC_BUF_TYPE_TX_FLOW, + HAL_DESC_BUF_TYPE_TX_BUFFER, + HAL_DESC_BUF_TYPE_RX_MSDU_LINK, + HAL_DESC_BUF_TYPE_RX_MPDU_LINK, + HAL_DESC_BUF_TYPE_RX_REO_QUEUE, + HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT, + HAL_DESC_BUF_TYPE_RX_BUFFER, + HAL_DESC_BUF_TYPE_IDLE_LINK, +}; + +#define HAL_DESC_REO_OWNED 4 +#define HAL_DESC_REO_QUEUE_DESC 8 +#define HAL_DESC_REO_QUEUE_EXT_DESC 9 +#define HAL_DESC_REO_NON_QOS_TID 16 + +#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0) +#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4) +#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8) + +struct hal_desc_header { + __le32 info0; +} __packed; + +struct hal_rx_mpdu_link_ptr { + struct ath12k_buffer_addr addr_info; +} __packed; + +struct hal_rx_msdu_details { + struct ath12k_buffer_addr buf_addr_info; + struct rx_msdu_desc rx_msdu_info; + struct rx_msdu_ext_desc rx_msdu_ext_info; +} __packed; + +#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0) +#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16) + +struct hal_rx_msdu_link { + struct hal_desc_header desc_hdr; + struct ath12k_buffer_addr buf_addr_info; + __le32 info0; + __le32 pn[4]; + struct hal_rx_msdu_details msdu_link[6]; +} __packed; + +struct hal_rx_reo_queue_ext { + struct hal_desc_header desc_hdr; + __le32 rsvd; + struct hal_rx_mpdu_link_ptr mpdu_link[15]; +} __packed; + +/* hal_rx_reo_queue_ext + * Consumer: REO + * Producer: REO + * + * descriptor_header + * Details about which module owns this struct. + * + * mpdu_link + * Pointer to the next MPDU_link descriptor in the MPDU queue. + */ + +enum hal_rx_reo_queue_pn_size { + HAL_RX_REO_QUEUE_PN_SIZE_24, + HAL_RX_REO_QUEUE_PN_SIZE_48, + HAL_RX_REO_QUEUE_PN_SIZE_128, +}; + +#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0) + +#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0) +#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1) +#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3) +#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4) +#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5) +#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7) +#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8) +#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9) +#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10) +#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(20, 11) +#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(21) +#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(22) +#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(23) +#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(24) +#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(26, 25) +#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(27) + +#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0) +#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1) +#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(22, 13) +#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(23) +#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(24) +#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31) + +#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0) +#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7) + +#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4) +#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10) +#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16) + +#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0) +#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24) + +#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0) +#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12) +#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16) + +struct hal_rx_reo_queue { + struct hal_desc_header desc_hdr; + __le32 rx_queue_num; + __le32 info0; + __le32 info1; + __le32 pn[4]; + __le32 last_rx_enqueue_timestamp; + __le32 last_rx_dequeue_timestamp; + __le32 next_aging_queue[2]; + __le32 prev_aging_queue[2]; + __le32 rx_bitmap[9]; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 processed_mpdus; + __le32 processed_msdus; + __le32 processed_total_bytes; + __le32 info5; + __le32 rsvd[2]; + struct hal_rx_reo_queue_ext ext_desc[]; +} __packed; + +/* hal_rx_reo_queue + * + * descriptor_header + * Details about which module owns this struct. Note that sub field + * Buffer_type shall be set to receive_reo_queue_descriptor. + * + * receive_queue_number + * Indicates the MPDU queue ID to which this MPDU link descriptor belongs. + * + * vld + * Valid bit indicating a session is established and the queue descriptor + * is valid. + * associated_link_descriptor_counter + * Indicates which of the 3 link descriptor counters shall be incremented + * or decremented when link descriptors are added or removed from this + * flow queue. + * disable_duplicate_detection + * When set, do not perform any duplicate detection. + * soft_reorder_enable + * When set, REO has been instructed to not perform the actual re-ordering + * of frames for this queue, but just to insert the reorder opcodes. + * ac + * Indicates the access category of the queue descriptor. + * bar + * Indicates if BAR has been received. + * retry + * Retry bit is checked if this bit is set. + * chk_2k_mode + * Indicates what type of operation is expected from Reo when the received + * frame SN falls within the 2K window. + * oor_mode + * Indicates what type of operation is expected when the received frame + * falls within the OOR window. + * ba_window_size + * Indicates the negotiated (window size + 1). Max of 256 bits. + * + * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA + * session, with window size of 0). The 3 values here are the main values + * validated, but other values should work as well. + * + * A BA window size of 0 (=> one frame entry bitmat), means that there is + * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory. + * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext. + * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext. + * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext. + * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable, + * pn_size + * REO shall perform the PN increment check, even number check, uneven + * number check, PN error check and size of the PN field check. + * ignore_ampdu_flag + * REO shall ignore the ampdu_flag on entrance descriptor for this queue. + * + * svld + * Sequence number in next field is valid one. + * ssn + * Starting Sequence number of the session. + * current_index + * Points to last forwarded packet + * seq_2k_error_detected_flag + * REO has detected a 2k error jump in the sequence number and from that + * moment forward, all new frames are forwarded directly to FW, without + * duplicate detect, reordering, etc. + * pn_error_detected_flag + * REO has detected a PN error. + */ + +#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29) +#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30) + +#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0) +#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16) +#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17) +#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19) +#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20) +#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21) +#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23) +#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24) +#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25) +#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26) +#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27) +#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28) +#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29) +#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30) +#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31) + +#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11) +#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24) +#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25) + +struct hal_reo_update_rx_queue { + struct hal_reo_cmd_hdr cmd; + __le32 queue_addr_lo; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 pn[4]; +} __packed; + +#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0) +#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1) + +struct hal_reo_unblock_cache { + struct hal_reo_cmd_hdr cmd; + __le32 info0; + __le32 rsvd[7]; +} __packed; + +enum hal_reo_exec_status { + HAL_REO_EXEC_STATUS_SUCCESS, + HAL_REO_EXEC_STATUS_BLOCKED, + HAL_REO_EXEC_STATUS_FAILED, + HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED, +}; + +#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0) +#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16) +#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26) + +struct hal_reo_status_hdr { + __le32 info0; + __le32 timestamp; +} __packed; + +/* hal_reo_status_hdr + * Producer: REO + * Consumer: SW + * + * status_num + * The value in this field is equal to value of the reo command + * number. This field helps to correlate the statuses with the REO + * commands. + * + * execution_time (in us) + * The amount of time REO took to excecute the command. Note that + * this time does not include the duration of the command waiting + * in the command ring, before the execution started. + * + * execution_status + * Execution status of the command. Values are defined in + * enum %HAL_REO_EXEC_STATUS_. + */ +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(21, 12) + +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7) + +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K GENMASK(3, 0) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16) + +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24) + +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0) +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(27, 12) + +#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28) + +struct hal_reo_get_queue_stats_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 pn[4]; + __le32 last_rx_enqueue_timestamp; + __le32 last_rx_dequeue_timestamp; + __le32 rx_bitmap[9]; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 num_mpdu_frames; + __le32 num_msdu_frames; + __le32 total_bytes; + __le32 info4; + __le32 info5; +} __packed; + +/* hal_reo_get_queue_stats_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * ssn + * Starting Sequence number of the session, this changes whenever + * window moves (can be filled by SW then maintained by REO). + * + * current_index + * Points to last forwarded packet. + * + * pn + * Bits of the PN number. + * + * last_rx_enqueue_timestamp + * last_rx_dequeue_timestamp + * Timestamp of arrival of the last MPDU for this queue and + * Timestamp of forwarding an MPDU accordingly. + * + * rx_bitmap + * When a bit is set, the corresponding frame is currently held + * in the re-order queue. The bitmap is Fully managed by HW. + * + * current_mpdu_count + * current_msdu_count + * The number of MPDUs and MSDUs in the queue. + * + * timeout_count + * The number of times REO started forwarding frames even though + * there is a hole in the bitmap. Forwarding reason is timeout. + * + * forward_due_to_bar_count + * The number of times REO started forwarding frames even though + * there is a hole in the bitmap. Fwd reason is reception of BAR. + * + * duplicate_count + * The number of duplicate frames that have been detected. + * + * frames_in_order_count + * The number of frames that have been received in order (without + * a hole that prevented them from being forwarded immediately). + * + * bar_received_count + * The number of times a BAR frame is received. + * + * mpdu_frames_processed_count + * msdu_frames_processed_count + * The total number of MPDU/MSDU frames that have been processed. + * + * total_bytes + * An approximation of the number of bytes received for this queue. + * + * late_receive_mpdu_count + * The number of MPDUs received after the window had already moved + * on. The 'late' sequence window is defined as + * (Window SSN - 256) - (Window SSN - 1). + * + * window_jump_2k + * The number of times the window moved more than 2K + * + * hole_count + * The number of times a hole was created in the receive bitmap. + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28) + +#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0) +#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1) +#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0) + +struct hal_reo_flush_queue_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 rsvd0[21]; + __le32 info1; +} __packed; + +/* hal_reo_flush_queue_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * error_detected + * Status of blocking resource + * + * 0 - No error has been detected while executing this command + * 1 - Error detected. The resource to be used for blocking was + * already in use. + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16) +#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18) + +struct hal_reo_flush_cache_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 rsvd0[21]; + __le32 info1; +} __packed; + +/* hal_reo_flush_cache_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * error_detected + * Status for blocking resource handling + * + * 0 - No error has been detected while executing this command + * 1 - An error in the blocking resource management was detected + * + * block_error_details + * only valid when error_detected is set + * + * 0 - No blocking related errors found + * 1 - Blocking resource is already in use + * 2 - Resource requested to be unblocked, was not blocked + * + * cache_controller_flush_status_hit + * The status that the cache controller returned on executing the + * flush command. + * + * 0 - miss; 1 - hit + * + * cache_controller_flush_status_desc_type + * Flush descriptor type + * + * cache_controller_flush_status_client_id + * Module who made the flush request + * + * In REO, this is always 0 + * + * cache_controller_flush_status_error + * Error condition + * + * 0 - No error found + * 1 - HW interface is still busy + * 2 - Line currently locked. Used for one line flush command + * 3 - At least one line is still locked. + * Used for cache flush command. + * + * cache_controller_flush_count + * The number of lines that were actually flushed out + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0) +#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1) + +struct hal_reo_unblock_cache_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 rsvd0[21]; + __le32 info1; +} __packed; + +/* hal_reo_unblock_cache_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * error_detected + * 0 - No error has been detected while executing this command + * 1 - The blocking resource was not in use, and therefore it could + * not be unblocked. + * + * unblock_type + * Reference to the type of unblock command + * 0 - Unblock a blocking resource + * 1 - The entire cache usage is unblock + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0) +#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1) + +#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0) +#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16) + +struct hal_reo_flush_timeout_list_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 info1; + __le32 rsvd0[20]; + __le32 info2; +} __packed; + +/* hal_reo_flush_timeout_list_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * error_detected + * 0 - No error has been detected while executing this command + * 1 - Command not properly executed and returned with error + * + * timeout_list_empty + * When set, REO has depleted the timeout list and all entries are + * gone. + * + * release_desc_count + * Producer: SW; Consumer: REO + * The number of link descriptor released + * + * forward_buf_count + * Producer: SW; Consumer: REO + * The number of buffers forwarded to the REO destination rings + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0) +#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0) +#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0) +#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0) +#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0) + +struct hal_reo_desc_thresh_reached_status { + struct hal_reo_status_hdr hdr; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 rsvd0[17]; + __le32 info5; +} __packed; + +/* hal_reo_desc_thresh_reached_status + * Producer: REO + * Consumer: SW + * + * status_hdr + * Details that can link this status with the original command. It + * also contains info on how long REO took to execute this command. + * + * threshold_index + * The index of the threshold register whose value got reached + * + * link_descriptor_counter0 + * link_descriptor_counter1 + * link_descriptor_counter2 + * link_descriptor_counter_sum + * Value of the respective counters at generation of this message + * + * looping_count + * A count value that indicates the number of times the producer of + * entries into this Ring has looped around the ring. + */ + +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_DATA_LENGTH GENMASK(13, 0) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L4_CSUM_STATUS BIT(14) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L3_CSUM_STATUS BIT(15) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_PID GENMASK(27, 24) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_QDISC BIT(28) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MULTICAST BIT(29) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MORE BIT(30) +#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_VALID_TOGGLE BIT(31) + +struct hal_tcl_entrance_from_ppe_ring { + __le32 buffer_addr; + __le32 info0; +} __packed; + +struct hal_mon_buf_ring { + __le32 paddr_lo; + __le32 paddr_hi; + __le64 cookie; +}; + +/* hal_mon_buf_ring + * Producer : SW + * Consumer : Monitor + * + * paddr_lo + * Lower 32-bit physical address of the buffer pointer from the source ring. + * paddr_hi + * bit range 7-0 : upper 8 bit of the physical address. + * bit range 31-8 : reserved. + * cookie + * Consumer: RxMon/TxMon 64 bit cookie of the buffers. + */ + +#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0) + +#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0) +#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16) +#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17) +#define HAL_MON_DEST_INFO0_INITIATOR BIT(18) +#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19) +#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20) +#define HAL_MON_DEST_INFO0_LOOPING_COUNT GENMASK(31, 28) + +struct hal_mon_dest_desc { + __le32 cookie; + __le32 reserved; + __le32 ppdu_id; + __le32 info0; +}; + +/* hal_mon_dest_ring + * Producer : TxMon/RxMon + * Consumer : SW + * cookie + * bit 0 -17 buf_id to track the skb's vaddr. + * ppdu_id + * Phy ppdu_id + * end_offset + * The offset into status buffer where DMA ended, ie., offset to the last + * TLV + last TLV size. + * flush_detected + * Indicates whether 'tx_flush' or 'rx_flush' occurred. + * end_of_ppdu + * Indicates end of ppdu. + * pmac_id + * Indicates PMAC that received from frame. + * empty_descriptor + * This descriptor is written on flush or end of ppdu or end of status + * buffer. + * ring_id + * updated by SRNG. + * looping_count + * updated by SRNG. + */ + +#endif /* ATH12K_HAL_DESC_H */ diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c new file mode 100644 index 000000000000..ee61a6462fdc --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal_rx.c @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "debug.h" +#include "hal.h" +#include "hal_tx.h" +#include "hal_rx.h" +#include "hal_desc.h" +#include "hif.h" + +static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, + u8 owner, u8 buffer_type, u32 magic) +{ + hdr->info0 = le32_encode_bits(owner, HAL_DESC_HDR_INFO0_OWNER) | + le32_encode_bits(buffer_type, HAL_DESC_HDR_INFO0_BUF_TYPE); + + /* Magic pattern in reserved bits for debugging */ + hdr->info0 |= le32_encode_bits(magic, HAL_DESC_HDR_INFO0_DBG_RESERVED); +} + +static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv, + struct ath12k_hal_reo_cmd *cmd) +{ + struct hal_reo_get_queue_stats *desc; + + tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) | + u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + + desc = (struct hal_reo_get_queue_stats *)tlv->value; + memset_startat(desc, 0, queue_addr_lo); + + desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + + desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo); + desc->info0 = le32_encode_bits(cmd->addr_hi, + HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI); + if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR) + desc->info0 |= cpu_to_le32(HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS); + + return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER); +} + +static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal, + struct hal_tlv_64_hdr *tlv, + struct ath12k_hal_reo_cmd *cmd) +{ + struct hal_reo_flush_cache *desc; + u8 avail_slot = ffz(hal->avail_blk_resource); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) { + if (avail_slot >= HAL_MAX_AVAIL_BLK_RES) + return -ENOSPC; + + hal->current_blk_index = avail_slot; + } + + tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) | + u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + + desc = (struct hal_reo_flush_cache *)tlv->value; + memset_startat(desc, 0, cache_addr_lo); + + desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + + desc->cache_addr_lo = cpu_to_le32(cmd->addr_lo); + desc->info0 = le32_encode_bits(cmd->addr_hi, + HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS) + desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) { + desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE); + desc->info0 |= + le32_encode_bits(avail_slot, + HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX); + } + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL) + desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL) + desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL); + + return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER); +} + +static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv, + struct ath12k_hal_reo_cmd *cmd) +{ + struct hal_reo_update_rx_queue *desc; + + tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) | + u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); + + desc = (struct hal_reo_update_rx_queue *)tlv->value; + memset_startat(desc, 0, queue_addr_lo); + + desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED); + + desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo); + desc->info0 = + le32_encode_bits(cmd->addr_hi, + HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID) | + le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN), + HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN); + + desc->info1 = + le32_encode_bits(cmd->rx_queue_num, + HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD), + HAL_REO_UPD_RX_QUEUE_INFO1_VLD) | + le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_ALDC), + HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION), + HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN), + HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN) | + le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_AC), + HAL_REO_UPD_RX_QUEUE_INFO1_AC) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR), + HAL_REO_UPD_RX_QUEUE_INFO1_BAR) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE), + HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY), + HAL_REO_UPD_RX_QUEUE_INFO1_RETRY) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE), + HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK), + HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN), + HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN), + HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE), + HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE) | + le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG), + HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG); + + if (cmd->pn_size == 24) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24; + else if (cmd->pn_size == 48) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48; + else if (cmd->pn_size == 128) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128; + + if (cmd->ba_window_size < 1) + cmd->ba_window_size = 1; + + if (cmd->ba_window_size == 1) + cmd->ba_window_size++; + + desc->info2 = + le32_encode_bits(cmd->ba_window_size - 1, + HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE) | + le32_encode_bits(cmd->pn_size, HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE) | + le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD), + HAL_REO_UPD_RX_QUEUE_INFO2_SVLD) | + le32_encode_bits(u32_get_bits(cmd->upd2, HAL_REO_CMD_UPD2_SSN), + HAL_REO_UPD_RX_QUEUE_INFO2_SSN) | + le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR), + HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR) | + le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR), + HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR); + + return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER); +} + +int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd) +{ + struct hal_tlv_64_hdr *reo_desc; + int ret; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + reo_desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!reo_desc) { + ret = -ENOBUFS; + goto out; + } + + switch (type) { + case HAL_REO_CMD_GET_QUEUE_STATS: + ret = ath12k_hal_reo_cmd_queue_stats(reo_desc, cmd); + break; + case HAL_REO_CMD_FLUSH_CACHE: + ret = ath12k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd); + break; + case HAL_REO_CMD_UPDATE_RX_QUEUE: + ret = ath12k_hal_reo_cmd_update_rx_queue(reo_desc, cmd); + break; + case HAL_REO_CMD_FLUSH_QUEUE: + case HAL_REO_CMD_UNBLOCK_CACHE: + case HAL_REO_CMD_FLUSH_TIMEOUT_LIST: + ath12k_warn(ab, "Unsupported reo command %d\n", type); + ret = -ENOTSUPP; + break; + default: + ath12k_warn(ab, "Unknown reo command %d\n", type); + ret = -EINVAL; + break; + } + +out: + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return ret; +} + +void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo, + dma_addr_t paddr, u32 cookie, u8 manager) +{ + u32 paddr_lo, paddr_hi; + + paddr_lo = lower_32_bits(paddr); + paddr_hi = upper_32_bits(paddr); + binfo->info0 = le32_encode_bits(paddr_lo, BUFFER_ADDR_INFO0_ADDR); + binfo->info1 = le32_encode_bits(paddr_hi, BUFFER_ADDR_INFO1_ADDR) | + le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE) | + le32_encode_bits(manager, BUFFER_ADDR_INFO1_RET_BUF_MGR); +} + +void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo, + dma_addr_t *paddr, + u32 *cookie, u8 *rbm) +{ + *paddr = (((u64)le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_ADDR)) << 32) | + le32_get_bits(binfo->info0, BUFFER_ADDR_INFO0_ADDR); + *cookie = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_SW_COOKIE); + *rbm = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_RET_BUF_MGR); +} + +void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus, + u32 *msdu_cookies, + enum hal_rx_buf_return_buf_manager *rbm) +{ + struct hal_rx_msdu_details *msdu; + u32 val; + int i; + + *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC; + + msdu = &link->msdu_link[0]; + *rbm = le32_get_bits(msdu->buf_addr_info.info1, + BUFFER_ADDR_INFO1_RET_BUF_MGR); + + for (i = 0; i < *num_msdus; i++) { + msdu = &link->msdu_link[i]; + + val = le32_get_bits(msdu->buf_addr_info.info0, + BUFFER_ADDR_INFO0_ADDR); + if (val == 0) { + *num_msdus = i; + break; + } + *msdu_cookies = le32_get_bits(msdu->buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + msdu_cookies++; + } +} + +int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, + struct hal_reo_dest_ring *desc, + dma_addr_t *paddr, u32 *desc_bank) +{ + enum hal_reo_dest_ring_push_reason push_reason; + enum hal_reo_dest_ring_error_code err_code; + u32 cookie, val; + + push_reason = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_PUSH_REASON); + err_code = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_ERROR_CODE); + ab->soc_stats.reo_error[err_code]++; + + if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED && + push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { + ath12k_warn(ab, "expected error push reason code, received %d\n", + push_reason); + return -EINVAL; + } + + val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE); + if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) { + ath12k_warn(ab, "expected buffer type link_desc"); + return -EINVAL; + } + + ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie); + *desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); + + return 0; +} + +int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, + struct hal_rx_wbm_rel_info *rel_info) +{ + struct hal_wbm_release_ring *wbm_desc = desc; + struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc; + enum hal_wbm_rel_desc_type type; + enum hal_wbm_rel_src_module rel_src; + bool hw_cc_done; + u64 desc_va; + u32 val; + + type = le32_get_bits(wbm_desc->info0, HAL_WBM_RELEASE_INFO0_DESC_TYPE); + /* We expect only WBM_REL buffer type */ + if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) { + WARN_ON(1); + return -EINVAL; + } + + rel_src = le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE); + if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA && + rel_src != HAL_WBM_REL_SRC_MODULE_REO) + return -EINVAL; + + /* The format of wbm rel ring desc changes based on the + * hw cookie conversion status + */ + hw_cc_done = le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_RX_INFO0_CC_STATUS); + + if (!hw_cc_done) { + val = le32_get_bits(wbm_desc->buf_addr_info.info1, + BUFFER_ADDR_INFO1_RET_BUF_MGR); + if (val != HAL_RX_BUF_RBM_SW3_BM) { + ab->soc_stats.invalid_rbm++; + return -EINVAL; + } + + rel_info->cookie = le32_get_bits(wbm_desc->buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + + rel_info->rx_desc = NULL; + } else { + val = le32_get_bits(wbm_cc_desc->info0, + HAL_WBM_RELEASE_RX_CC_INFO0_RBM); + if (val != HAL_RX_BUF_RBM_SW3_BM) { + ab->soc_stats.invalid_rbm++; + return -EINVAL; + } + + rel_info->cookie = le32_get_bits(wbm_cc_desc->info1, + HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE); + + desc_va = ((u64)le32_to_cpu(wbm_cc_desc->buf_va_hi) << 32 | + le32_to_cpu(wbm_cc_desc->buf_va_lo)); + rel_info->rx_desc = + (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + } + + rel_info->err_rel_src = rel_src; + rel_info->hw_cc_done = hw_cc_done; + + rel_info->first_msdu = le32_get_bits(wbm_desc->info3, + HAL_WBM_RELEASE_INFO3_FIRST_MSDU); + rel_info->last_msdu = le32_get_bits(wbm_desc->info3, + HAL_WBM_RELEASE_INFO3_LAST_MSDU); + rel_info->continuation = le32_get_bits(wbm_desc->info3, + HAL_WBM_RELEASE_INFO3_CONTINUATION); + + if (rel_info->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO) { + rel_info->push_reason = + le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON); + rel_info->err_code = + le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE); + } else { + rel_info->push_reason = + le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON); + rel_info->err_code = + le32_get_bits(wbm_desc->info0, + HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE); + } + + return 0; +} + +void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab, + struct ath12k_buffer_addr *buff_addr, + dma_addr_t *paddr, u32 *cookie) +{ + *paddr = ((u64)(le32_get_bits(buff_addr->info1, + BUFFER_ADDR_INFO1_ADDR)) << 32) | + le32_get_bits(buff_addr->info0, BUFFER_ADDR_INFO0_ADDR); + + *cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE); +} + +void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, + struct hal_wbm_release_ring *dst_desc, + struct hal_wbm_release_ring *src_desc, + enum hal_wbm_rel_bm_act action) +{ + dst_desc->buf_addr_info = src_desc->buf_addr_info; + dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW, + HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) | + le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) | + le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK, + HAL_WBM_RELEASE_INFO0_DESC_TYPE); +} + +void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct hal_reo_get_queue_stats_status *desc = + (struct hal_reo_get_queue_stats_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + + ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n"); + ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n", + status->uniform_hdr.cmd_num, + status->uniform_hdr.cmd_status); + ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %u cur_idx %u\n", + le32_get_bits(desc->info0, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN), + le32_get_bits(desc->info0, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX)); + ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n", + desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]); + ath12k_dbg(ab, ATH12K_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n", + desc->last_rx_enqueue_timestamp, + desc->last_rx_dequeue_timestamp); + ath12k_dbg(ab, ATH12K_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n", + desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2], + desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5], + desc->rx_bitmap[6], desc->rx_bitmap[7]); + ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %u cur_msdu %u\n", + le32_get_bits(desc->info1, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT), + le32_get_bits(desc->info1, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT)); + ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %u fwd_bar %u dup_count %u\n", + le32_get_bits(desc->info2, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT), + le32_get_bits(desc->info2, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT), + le32_get_bits(desc->info2, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT)); + ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %u bar_rcvd %u\n", + le32_get_bits(desc->info3, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT), + le32_get_bits(desc->info3, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT)); + ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n", + desc->num_mpdu_frames, desc->num_msdu_frames, + desc->total_bytes); + ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %u win_jump_2k %u hole_cnt %u\n", + le32_get_bits(desc->info4, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU), + le32_get_bits(desc->info2, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K), + le32_get_bits(desc->info4, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT)); + ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %u\n", + le32_get_bits(desc->info5, + HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT)); +} + +void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct hal_reo_flush_queue_status *desc = + (struct hal_reo_flush_queue_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + status->u.flush_queue.err_detected = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED); +} + +void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_reo_flush_cache_status *desc = + (struct hal_reo_flush_cache_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + + status->u.flush_cache.err_detected = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR); + status->u.flush_cache.err_code = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE); + if (!status->u.flush_cache.err_code) + hal->avail_blk_resource |= BIT(hal->current_blk_index); + + status->u.flush_cache.cache_controller_flush_status_hit = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT); + + status->u.flush_cache.cache_controller_flush_status_desc_type = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE); + status->u.flush_cache.cache_controller_flush_status_client_id = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID); + status->u.flush_cache.cache_controller_flush_status_err = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR); + status->u.flush_cache.cache_controller_flush_status_cnt = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT); +} + +void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct ath12k_hal *hal = &ab->hal; + struct hal_reo_unblock_cache_status *desc = + (struct hal_reo_unblock_cache_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + + status->u.unblock_cache.err_detected = + le32_get_bits(desc->info0, + HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR); + status->u.unblock_cache.unblock_type = + le32_get_bits(desc->info0, + HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE); + + if (!status->u.unblock_cache.err_detected && + status->u.unblock_cache.unblock_type == + HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE) + hal->avail_blk_resource &= ~BIT(hal->current_blk_index); +} + +void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct hal_reo_flush_timeout_list_status *desc = + (struct hal_reo_flush_timeout_list_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + + status->u.timeout_list.err_detected = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR); + status->u.timeout_list.list_empty = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY); + + status->u.timeout_list.release_desc_cnt = + le32_get_bits(desc->info1, + HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT); + status->u.timeout_list.fwd_buf_cnt = + le32_get_bits(desc->info0, + HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT); +} + +void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct hal_reo_desc_thresh_reached_status *desc = + (struct hal_reo_desc_thresh_reached_status *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->hdr.info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); + + status->u.desc_thresh_reached.threshold_idx = + le32_get_bits(desc->info0, + HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX); + + status->u.desc_thresh_reached.link_desc_counter0 = + le32_get_bits(desc->info1, + HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0); + + status->u.desc_thresh_reached.link_desc_counter1 = + le32_get_bits(desc->info2, + HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1); + + status->u.desc_thresh_reached.link_desc_counter2 = + le32_get_bits(desc->info3, + HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2); + + status->u.desc_thresh_reached.link_desc_counter_sum = + le32_get_bits(desc->info4, + HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM); +} + +void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status) +{ + struct hal_reo_status_hdr *desc = + (struct hal_reo_status_hdr *)tlv->value; + + status->uniform_hdr.cmd_num = + le32_get_bits(desc->info0, + HAL_REO_STATUS_HDR_INFO0_STATUS_NUM); + status->uniform_hdr.cmd_status = + le32_get_bits(desc->info0, + HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS); +} + +u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) +{ + u32 num_ext_desc; + + if (ba_window_size <= 1) { + if (tid != HAL_DESC_REO_NON_QOS_TID) + num_ext_desc = 1; + else + num_ext_desc = 0; + } else if (ba_window_size <= 105) { + num_ext_desc = 1; + } else if (ba_window_size <= 210) { + num_ext_desc = 2; + } else { + num_ext_desc = 3; + } + + return sizeof(struct hal_rx_reo_queue) + + (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)); +} + +void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, + int tid, u32 ba_window_size, + u32 start_seq, enum hal_pn_type type) +{ + struct hal_rx_reo_queue_ext *ext_desc; + + memset(qdesc, 0, sizeof(*qdesc)); + + ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_DESC, + REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0); + + qdesc->rx_queue_num = le32_encode_bits(tid, HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER); + + qdesc->info0 = + le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_VLD) | + le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER) | + le32_encode_bits(ath12k_tid_to_ac(tid), HAL_RX_REO_QUEUE_INFO0_AC); + + if (ba_window_size < 1) + ba_window_size = 1; + + if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID) + ba_window_size++; + + if (ba_window_size == 1) + qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_RETRY); + + qdesc->info0 |= le32_encode_bits(ba_window_size - 1, + HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE); + switch (type) { + case HAL_PN_TYPE_NONE: + case HAL_PN_TYPE_WAPI_EVEN: + case HAL_PN_TYPE_WAPI_UNEVEN: + break; + case HAL_PN_TYPE_WPA: + qdesc->info0 |= + le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_PN_CHECK) | + le32_encode_bits(HAL_RX_REO_QUEUE_PN_SIZE_48, + HAL_RX_REO_QUEUE_INFO0_PN_SIZE); + break; + } + + /* TODO: Set Ignore ampdu flags based on BA window size and/or + * AMPDU capabilities + */ + qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG); + + qdesc->info1 |= le32_encode_bits(0, HAL_RX_REO_QUEUE_INFO1_SVLD); + + if (start_seq <= 0xfff) + qdesc->info1 = le32_encode_bits(start_seq, + HAL_RX_REO_QUEUE_INFO1_SSN); + + if (tid == HAL_DESC_REO_NON_QOS_TID) + return; + + ext_desc = qdesc->ext_desc; + + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is received. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + memset(ext_desc, 0, 3 * sizeof(*ext_desc)); + ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, + REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1); + ext_desc++; + ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, + REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2); + ext_desc++; + ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, + REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3); +} + +void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, + struct hal_srng *srng) +{ + struct hal_srng_params params; + struct hal_tlv_64_hdr *tlv; + struct hal_reo_get_queue_stats *desc; + int i, cmd_num = 1; + int entry_size; + u8 *entry; + + memset(¶ms, 0, sizeof(params)); + + entry_size = ath12k_hal_srng_get_entrysize(ab, HAL_REO_CMD); + ath12k_hal_srng_get_params(ab, srng, ¶ms); + entry = (u8 *)params.ring_base_vaddr; + + for (i = 0; i < params.num_entries; i++) { + tlv = (struct hal_tlv_64_hdr *)entry; + desc = (struct hal_reo_get_queue_stats *)tlv->value; + desc->cmd.info0 = le32_encode_bits(cmd_num++, + HAL_REO_CMD_HDR_INFO0_CMD_NUMBER); + entry += entry_size; + } +} + +void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map) +{ + u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; + u32 val; + + val = ath12k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE); + + val |= u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE) | + u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE); + ath12k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val); + + val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab)); + + val &= ~(HAL_REO1_MISC_CTL_FRAG_DST_RING | + HAL_REO1_MISC_CTL_BAR_DST_RING); + val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0, + HAL_REO1_MISC_CTL_FRAG_DST_RING); + val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0, + HAL_REO1_MISC_CTL_BAR_DST_RING); + ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab), val); + + ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab), + HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC); + ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab), + HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC); + ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab), + HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC); + ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab), + HAL_DEFAULT_VO_REO_TIMEOUT_USEC); + + ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2, + ring_hash_map); + ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3, + ring_hash_map); +} diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h new file mode 100644 index 000000000000..fcfb6c819047 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal_rx.h @@ -0,0 +1,704 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HAL_RX_H +#define ATH12K_HAL_RX_H + +struct hal_rx_wbm_rel_info { + u32 cookie; + enum hal_wbm_rel_src_module err_rel_src; + enum hal_reo_dest_ring_push_reason push_reason; + u32 err_code; + bool first_msdu; + bool last_msdu; + bool continuation; + void *rx_desc; + bool hw_cc_done; +}; + +#define HAL_INVALID_PEERID 0xffff +#define VHT_SIG_SU_NSS_MASK 0x7 + +#define HAL_RX_MAX_MCS 12 +#define HAL_RX_MAX_NSS 8 + +#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \ + le32_get_bits((__val), GENMASK(7, 0)) + +#define HAL_RX_MPDU_INFO_PN_GET_BYTE2(__val) \ + le32_get_bits((__val), GENMASK(15, 8)) + +#define HAL_RX_MPDU_INFO_PN_GET_BYTE3(__val) \ + le32_get_bits((__val), GENMASK(23, 16)) + +#define HAL_RX_MPDU_INFO_PN_GET_BYTE4(__val) \ + le32_get_bits((__val), GENMASK(31, 24)) + +struct hal_rx_mon_status_tlv_hdr { + u32 hdr; + u8 value[]; +}; + +enum hal_rx_su_mu_coding { + HAL_RX_SU_MU_CODING_BCC, + HAL_RX_SU_MU_CODING_LDPC, + HAL_RX_SU_MU_CODING_MAX, +}; + +enum hal_rx_gi { + HAL_RX_GI_0_8_US, + HAL_RX_GI_0_4_US, + HAL_RX_GI_1_6_US, + HAL_RX_GI_3_2_US, + HAL_RX_GI_MAX, +}; + +enum hal_rx_bw { + HAL_RX_BW_20MHZ, + HAL_RX_BW_40MHZ, + HAL_RX_BW_80MHZ, + HAL_RX_BW_160MHZ, + HAL_RX_BW_MAX, +}; + +enum hal_rx_preamble { + HAL_RX_PREAMBLE_11A, + HAL_RX_PREAMBLE_11B, + HAL_RX_PREAMBLE_11N, + HAL_RX_PREAMBLE_11AC, + HAL_RX_PREAMBLE_11AX, + HAL_RX_PREAMBLE_MAX, +}; + +enum hal_rx_reception_type { + HAL_RX_RECEPTION_TYPE_SU, + HAL_RX_RECEPTION_TYPE_MU_MIMO, + HAL_RX_RECEPTION_TYPE_MU_OFDMA, + HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO, + HAL_RX_RECEPTION_TYPE_MAX, +}; + +enum hal_rx_legacy_rate { + HAL_RX_LEGACY_RATE_1_MBPS, + HAL_RX_LEGACY_RATE_2_MBPS, + HAL_RX_LEGACY_RATE_5_5_MBPS, + HAL_RX_LEGACY_RATE_6_MBPS, + HAL_RX_LEGACY_RATE_9_MBPS, + HAL_RX_LEGACY_RATE_11_MBPS, + HAL_RX_LEGACY_RATE_12_MBPS, + HAL_RX_LEGACY_RATE_18_MBPS, + HAL_RX_LEGACY_RATE_24_MBPS, + HAL_RX_LEGACY_RATE_36_MBPS, + HAL_RX_LEGACY_RATE_48_MBPS, + HAL_RX_LEGACY_RATE_54_MBPS, + HAL_RX_LEGACY_RATE_INVALID, +}; + +#define HAL_TLV_STATUS_PPDU_NOT_DONE 0 +#define HAL_TLV_STATUS_PPDU_DONE 1 +#define HAL_TLV_STATUS_BUF_DONE 2 +#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3 +#define HAL_RX_FCS_LEN 4 + +enum hal_rx_mon_status { + HAL_RX_MON_STATUS_PPDU_NOT_DONE, + HAL_RX_MON_STATUS_PPDU_DONE, + HAL_RX_MON_STATUS_BUF_DONE, +}; + +#define HAL_RX_MAX_MPDU 256 +#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5) + +struct hal_rx_user_status { + u32 mcs:4, + nss:3, + ofdma_info_valid:1, + ul_ofdma_ru_start_index:7, + ul_ofdma_ru_width:7, + ul_ofdma_ru_size:8; + u32 ul_ofdma_user_v0_word0; + u32 ul_ofdma_user_v0_word1; + u32 ast_index; + u32 tid; + u16 tcp_msdu_count; + u16 tcp_ack_msdu_count; + u16 udp_msdu_count; + u16 other_msdu_count; + u16 frame_control; + u8 frame_control_info_valid; + u8 data_sequence_control_info_valid; + u16 first_data_seq_ctrl; + u32 preamble_type; + u16 ht_flags; + u16 vht_flags; + u16 he_flags; + u8 rs_flags; + u8 ldpc; + u32 mpdu_cnt_fcs_ok; + u32 mpdu_cnt_fcs_err; + u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; + u32 mpdu_ok_byte_count; + u32 mpdu_err_byte_count; +}; + +#define HAL_MAX_UL_MU_USERS 37 + +struct hal_rx_mon_ppdu_info { + u32 ppdu_id; + u32 last_ppdu_id; + u64 ppdu_ts; + u32 num_mpdu_fcs_ok; + u32 num_mpdu_fcs_err; + u32 preamble_type; + u32 mpdu_len; + u16 chan_num; + u16 tcp_msdu_count; + u16 tcp_ack_msdu_count; + u16 udp_msdu_count; + u16 other_msdu_count; + u16 peer_id; + u8 rate; + u8 mcs; + u8 nss; + u8 bw; + u8 vht_flag_values1; + u8 vht_flag_values2; + u8 vht_flag_values3[4]; + u8 vht_flag_values4; + u8 vht_flag_values5; + u16 vht_flag_values6; + u8 is_stbc; + u8 gi; + u8 sgi; + u8 ldpc; + u8 beamformed; + u8 rssi_comb; + u16 tid; + u8 fc_valid; + u16 ht_flags; + u16 vht_flags; + u16 he_flags; + u16 he_mu_flags; + u8 dcm; + u8 ru_alloc; + u8 reception_type; + u64 tsft; + u64 rx_duration; + u16 frame_control; + u32 ast_index; + u8 rs_fcs_err; + u8 rs_flags; + u8 cck_flag; + u8 ofdm_flag; + u8 ulofdma_flag; + u8 frame_control_info_valid; + u16 he_per_user_1; + u16 he_per_user_2; + u8 he_per_user_position; + u8 he_per_user_known; + u16 he_flags1; + u16 he_flags2; + u8 he_RU[4]; + u16 he_data1; + u16 he_data2; + u16 he_data3; + u16 he_data4; + u16 he_data5; + u16 he_data6; + u32 ppdu_len; + u32 prev_ppdu_id; + u32 device_id; + u16 first_data_seq_ctrl; + u8 monitor_direct_used; + u8 data_sequence_control_info_valid; + u8 ltf_size; + u8 rxpcu_filter_pass; + s8 rssi_chain[8][8]; + u32 num_users; + u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + u8 addr4[ETH_ALEN]; + struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS]; + u8 userid; + u16 ampdu_id[HAL_MAX_UL_MU_USERS]; + bool first_msdu_in_mpdu; + bool is_ampdu; + u8 medium_prot_type; +}; + +#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) + +struct hal_rx_ppdu_start { + __le32 info0; + __le32 chan_num; + __le32 ppdu_start_ts; +} __packed; + +#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16) + +#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20) + +#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16) + +#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16) + +#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16) + +#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16) + +#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16) + +#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0) + +struct hal_rx_ppdu_end_user_stats { + __le32 rsvd0[2]; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 ht_ctrl; + __le32 rsvd1[2]; + __le32 info4; + __le32 info5; + __le32 usr_resp_ref; + __le32 info6; + __le32 rsvd3[4]; + __le32 mpdu_ok_cnt; + __le32 rsvd4; + __le32 mpdu_err_cnt; + __le32 rsvd5[2]; + __le32 usr_resp_ref_ext; + __le32 rsvd6; +} __packed; + +struct hal_rx_ppdu_end_user_stats_ext { + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 info4; + __le32 info5; + __le32 info6; +} __packed; + +#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0) +#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7) + +#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4) +#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6) +#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7) + +struct hal_rx_ht_sig_info { + __le32 info0; + __le32 info1; +} __packed; + +#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0) +#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4) + +struct hal_rx_lsig_b_info { + __le32 info0; +} __packed; + +#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0) +#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5) +#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24) + +struct hal_rx_lsig_a_info { + __le32 info0; +} __packed; + +#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0) +#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3) +#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4) +#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10) + +#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0) +#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2) +#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4) +#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8) + +struct hal_rx_vht_sig_a_info { + __le32 info0; + __le32 info1; +} __packed; + +enum hal_rx_vht_sig_a_gi_setting { + HAL_RX_VHT_SIG_A_NORMAL_GI = 0, + HAL_RX_VHT_SIG_A_SHORT_GI = 1, + HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3, +}; + +#define HE_GI_0_8 0 +#define HE_GI_0_4 1 +#define HE_GI_1_6 2 +#define HE_GI_3_2 3 + +#define HE_LTF_1_X 0 +#define HE_LTF_2_X 1 +#define HE_LTF_4_X 2 + +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2) + +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13) +#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15) + +struct hal_rx_he_sig_a_su_info { + __le32 info0; + __le32 info1; +} __packed; + +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG BIT(1) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB GENMASK(3, 1) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB BIT(4) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR GENMASK(10, 5) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE GENMASK(14, 11) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW GENMASK(17, 15) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB GENMASK(21, 18) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB BIT(22) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE GENMASK(24, 23) +#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25) + +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13) +#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15) + +struct hal_rx_he_sig_a_mu_dl_info { + __le32 info0; + __le32 info1; +} __packed; + +#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0) + +struct hal_rx_he_sig_b1_mu_info { + __le32 info0; +} __packed; + +#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0) +#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15) +#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20) +#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29) + +struct hal_rx_he_sig_b2_mu_info { + __le32 info0; +} __packed; + +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19) +#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20) + +struct hal_rx_he_sig_b2_ofdma_info { + __le32 info0; +} __packed; + +enum hal_rx_ul_reception_type { + HAL_RECEPTION_TYPE_ULOFMDA, + HAL_RECEPTION_TYPE_ULMIMO, + HAL_RECEPTION_TYPE_OTHER, + HAL_RECEPTION_TYPE_FRAMELESS +}; + +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0) + +struct hal_rx_phyrx_rssi_legacy_info { + __le32 rsvd[35]; + __le32 info0; +} __packed; + +#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) +#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16) +#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) +struct hal_rx_mpdu_start { + __le32 info0; + __le32 info1; + __le32 rsvd1[11]; + __le32 info2; + __le32 rsvd2[9]; +} __packed; + +#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0) +struct hal_rx_ppdu_end_duration { + __le32 rsvd0[9]; + __le32 info0; + __le32 rsvd1[4]; +} __packed; + +struct hal_rx_rxpcu_classification_overview { + u32 rsvd0; +} __packed; + +struct hal_rx_msdu_desc_info { + u32 msdu_flags; + u16 msdu_len; /* 14 bits for length */ +}; + +#define HAL_RX_NUM_MSDU_DESC 6 +struct hal_rx_msdu_list { + struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC]; + u32 sw_cookie[HAL_RX_NUM_MSDU_DESC]; + u8 rbm[HAL_RX_NUM_MSDU_DESC]; +}; + +#define HAL_RX_FBM_ACK_INFO0_ADDR1_31_0 GENMASK(31, 0) +#define HAL_RX_FBM_ACK_INFO1_ADDR1_47_32 GENMASK(15, 0) +#define HAL_RX_FBM_ACK_INFO1_ADDR2_15_0 GENMASK(31, 16) +#define HAL_RX_FBM_ACK_INFO2_ADDR2_47_16 GENMASK(31, 0) + +struct hal_rx_frame_bitmap_ack { + __le32 reserved; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 reserved1[10]; +} __packed; + +#define HAL_RX_RESP_REQ_INFO0_PPDU_ID GENMASK(15, 0) +#define HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE BIT(16) +#define HAL_RX_RESP_REQ_INFO1_DURATION GENMASK(15, 0) +#define HAL_RX_RESP_REQ_INFO1_RATE_MCS GENMASK(24, 21) +#define HAL_RX_RESP_REQ_INFO1_SGI GENMASK(26, 25) +#define HAL_RX_RESP_REQ_INFO1_STBC BIT(27) +#define HAL_RX_RESP_REQ_INFO1_LDPC BIT(28) +#define HAL_RX_RESP_REQ_INFO1_IS_AMPDU BIT(29) +#define HAL_RX_RESP_REQ_INFO2_NUM_USER GENMASK(6, 0) +#define HAL_RX_RESP_REQ_INFO3_ADDR1_31_0 GENMASK(31, 0) +#define HAL_RX_RESP_REQ_INFO4_ADDR1_47_32 GENMASK(15, 0) +#define HAL_RX_RESP_REQ_INFO4_ADDR1_15_0 GENMASK(31, 16) +#define HAL_RX_RESP_REQ_INFO5_ADDR1_47_16 GENMASK(31, 0) + +struct hal_rx_resp_req_info { + __le32 info0; + __le32 reserved[1]; + __le32 info1; + __le32 info2; + __le32 reserved1[2]; + __le32 info3; + __le32 info4; + __le32 info5; + __le32 reserved2[5]; +} __packed; + +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF + +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID BIT(30) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER BIT(31) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS GENMASK(2, 0) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS GENMASK(6, 3) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC BIT(7) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_DCM BIT(8) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START GENMASK(15, 9) +#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE GENMASK(18, 16) + +/* HE Radiotap data1 Mask */ +#define HE_SU_FORMAT_TYPE 0x0000 +#define HE_EXT_SU_FORMAT_TYPE 0x0001 +#define HE_MU_FORMAT_TYPE 0x0002 +#define HE_TRIG_FORMAT_TYPE 0x0003 +#define HE_BEAM_CHANGE_KNOWN 0x0008 +#define HE_DL_UL_KNOWN 0x0010 +#define HE_MCS_KNOWN 0x0020 +#define HE_DCM_KNOWN 0x0040 +#define HE_CODING_KNOWN 0x0080 +#define HE_LDPC_EXTRA_SYMBOL_KNOWN 0x0100 +#define HE_STBC_KNOWN 0x0200 +#define HE_DATA_BW_RU_KNOWN 0x4000 +#define HE_DOPPLER_KNOWN 0x8000 +#define HE_BSS_COLOR_KNOWN 0x0004 + +/* HE Radiotap data2 Mask */ +#define HE_GI_KNOWN 0x0002 +#define HE_TXBF_KNOWN 0x0010 +#define HE_PE_DISAMBIGUITY_KNOWN 0x0020 +#define HE_TXOP_KNOWN 0x0040 +#define HE_LTF_SYMBOLS_KNOWN 0x0004 +#define HE_PRE_FEC_PADDING_KNOWN 0x0008 +#define HE_MIDABLE_PERIODICITY_KNOWN 0x0080 + +/* HE radiotap data3 shift values */ +#define HE_BEAM_CHANGE_SHIFT 6 +#define HE_DL_UL_SHIFT 7 +#define HE_TRANSMIT_MCS_SHIFT 8 +#define HE_DCM_SHIFT 12 +#define HE_CODING_SHIFT 13 +#define HE_LDPC_EXTRA_SYMBOL_SHIFT 14 +#define HE_STBC_SHIFT 15 + +/* HE radiotap data4 shift values */ +#define HE_STA_ID_SHIFT 4 + +/* HE radiotap data5 */ +#define HE_GI_SHIFT 4 +#define HE_LTF_SIZE_SHIFT 6 +#define HE_LTF_SYM_SHIFT 8 +#define HE_TXBF_SHIFT 14 +#define HE_PE_DISAMBIGUITY_SHIFT 15 +#define HE_PRE_FEC_PAD_SHIFT 12 + +/* HE radiotap data6 */ +#define HE_DOPPLER_SHIFT 4 +#define HE_TXOP_SHIFT 8 + +/* HE radiotap HE-MU flags1 */ +#define HE_SIG_B_MCS_KNOWN 0x0010 +#define HE_SIG_B_DCM_KNOWN 0x0040 +#define HE_SIG_B_SYM_NUM_KNOWN 0x8000 +#define HE_RU_0_KNOWN 0x0100 +#define HE_RU_1_KNOWN 0x0200 +#define HE_RU_2_KNOWN 0x0400 +#define HE_RU_3_KNOWN 0x0800 +#define HE_DCM_FLAG_1_SHIFT 5 +#define HE_SPATIAL_REUSE_MU_KNOWN 0x0100 +#define HE_SIG_B_COMPRESSION_FLAG_1_KNOWN 0x4000 + +/* HE radiotap HE-MU flags2 */ +#define HE_SIG_B_COMPRESSION_FLAG_2_SHIFT 3 +#define HE_BW_KNOWN 0x0004 +#define HE_NUM_SIG_B_SYMBOLS_SHIFT 4 +#define HE_SIG_B_COMPRESSION_FLAG_2_KNOWN 0x0100 +#define HE_NUM_SIG_B_FLAG_2_SHIFT 9 +#define HE_LTF_FLAG_2_SYMBOLS_SHIFT 12 +#define HE_LTF_KNOWN 0x8000 + +/* HE radiotap per_user_1 */ +#define HE_STA_SPATIAL_SHIFT 11 +#define HE_TXBF_SHIFT 14 +#define HE_RESERVED_SET_TO_1_SHIFT 19 +#define HE_STA_CODING_SHIFT 20 + +/* HE radiotap per_user_2 */ +#define HE_STA_MCS_SHIFT 4 +#define HE_STA_DCM_SHIFT 5 + +/* HE radiotap per user known */ +#define HE_USER_FIELD_POSITION_KNOWN 0x01 +#define HE_STA_ID_PER_USER_KNOWN 0x02 +#define HE_STA_NSTS_KNOWN 0x04 +#define HE_STA_TX_BF_KNOWN 0x08 +#define HE_STA_SPATIAL_CONFIG_KNOWN 0x10 +#define HE_STA_MCS_KNOWN 0x20 +#define HE_STA_DCM_KNOWN 0x40 +#define HE_STA_CODING_KNOWN 0x80 + +#define HAL_RX_MPDU_ERR_FCS BIT(0) +#define HAL_RX_MPDU_ERR_DECRYPT BIT(1) +#define HAL_RX_MPDU_ERR_TKIP_MIC BIT(2) +#define HAL_RX_MPDU_ERR_AMSDU_ERR BIT(3) +#define HAL_RX_MPDU_ERR_OVERFLOW BIT(4) +#define HAL_RX_MPDU_ERR_MSDU_LEN BIT(5) +#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6) +#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7) + +static inline +enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) +{ + enum nl80211_he_ru_alloc ret; + + switch (ru_tones) { + case RU_52: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; + break; + case RU_106: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; + break; + case RU_242: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; + break; + case RU_484: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; + break; + case RU_996: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case RU_26: + fallthrough; + default: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; + break; + } + return ret; +} + +void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); +void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus, + u32 *msdu_cookies, + enum hal_rx_buf_return_buf_manager *rbm); +void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, + struct hal_wbm_release_ring *dst_desc, + struct hal_wbm_release_ring *src_desc, + enum hal_wbm_rel_bm_act action); +void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo, + dma_addr_t paddr, u32 cookie, u8 manager); +void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo, + dma_addr_t *paddr, + u32 *cookie, u8 *rbm); +int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, + struct hal_reo_dest_ring *desc, + dma_addr_t *paddr, u32 *desc_bank); +int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, + struct hal_rx_wbm_rel_info *rel_info); +void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab, + struct ath12k_buffer_addr *buff_addr, + dma_addr_t *paddr, u32 *cookie); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.c b/drivers/net/wireless/ath/ath12k/hal_tx.c new file mode 100644 index 000000000000..869e07e406fe --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal_tx.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "hal_desc.h" +#include "hal.h" +#include "hal_tx.h" +#include "hif.h" + +#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64 + +/* dscp_tid_map - Default DSCP-TID mapping + *================= + * DSCP TID + *================= + * 000xxx 0 + * 001xxx 1 + * 010xxx 2 + * 011xxx 3 + * 100xxx 4 + * 101xxx 5 + * 110xxx 6 + * 111xxx 7 + */ +static inline u8 dscp2tid(u8 dscp) +{ + return dscp >> 3; +} + +void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd, + struct hal_tx_info *ti) +{ + tcl_cmd->buf_addr_info.info0 = + le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR); + tcl_cmd->buf_addr_info.info1 = + le32_encode_bits(((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT), + BUFFER_ADDR_INFO1_ADDR); + tcl_cmd->buf_addr_info.info1 |= + le32_encode_bits((ti->rbm_id), BUFFER_ADDR_INFO1_RET_BUF_MGR) | + le32_encode_bits(ti->desc_id, BUFFER_ADDR_INFO1_SW_COOKIE); + + tcl_cmd->info0 = + le32_encode_bits(ti->type, HAL_TCL_DATA_CMD_INFO0_DESC_TYPE) | + le32_encode_bits(ti->bank_id, HAL_TCL_DATA_CMD_INFO0_BANK_ID); + + tcl_cmd->info1 = + le32_encode_bits(ti->meta_data_flags, + HAL_TCL_DATA_CMD_INFO1_CMD_NUM); + + tcl_cmd->info2 = cpu_to_le32(ti->flags0) | + le32_encode_bits(ti->data_len, HAL_TCL_DATA_CMD_INFO2_DATA_LEN) | + le32_encode_bits(ti->pkt_offset, HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET); + + tcl_cmd->info3 = cpu_to_le32(ti->flags1) | + le32_encode_bits(ti->tid, HAL_TCL_DATA_CMD_INFO3_TID) | + le32_encode_bits(ti->lmac_id, HAL_TCL_DATA_CMD_INFO3_PMAC_ID) | + le32_encode_bits(ti->vdev_id, HAL_TCL_DATA_CMD_INFO3_VDEV_ID); + + tcl_cmd->info4 = le32_encode_bits(ti->bss_ast_idx, + HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX) | + le32_encode_bits(ti->bss_ast_hash, + HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM); + tcl_cmd->info5 = 0; +} + +void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id) +{ + u32 ctrl_reg_val; + u32 addr; + u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE], dscp, tid; + int i; + u32 value; + + ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + HAL_TCL1_RING_CMN_CTRL_REG); + /* Enable read/write access */ + ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN; + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val); + + addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP + + (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4)); + + /* Configure each DSCP-TID mapping in three bits there by configure + * three bytes in an iteration. + */ + for (i = 0, dscp = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 3) { + tid = dscp2tid(dscp); + value = u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP0); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP1); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP2); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP3); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP4); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP5); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP6); + dscp++; + + tid = dscp2tid(dscp); + value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP7); + dscp++; + + memcpy(&hw_map_val[i], &value, 3); + } + + for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) { + ath12k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]); + addr += 4; + } + + /* Disable read/write access */ + ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + HAL_TCL1_RING_CMN_CTRL_REG); + ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN; + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG + + HAL_TCL1_RING_CMN_CTRL_REG, + ctrl_reg_val); +} + +void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config, + u8 bank_id) +{ + ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id, + bank_config); +} diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h new file mode 100644 index 000000000000..7c837094a6f7 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hal_tx.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HAL_TX_H +#define ATH12K_HAL_TX_H + +#include "hal_desc.h" +#include "core.h" + +#define HAL_TX_ADDRX_EN 1 +#define HAL_TX_ADDRY_EN 2 + +#define HAL_TX_ADDR_SEARCH_DEFAULT 0 +#define HAL_TX_ADDR_SEARCH_INDEX 1 + +/* TODO: check all these data can be managed with struct ath12k_tx_desc_info for perf */ +struct hal_tx_info { + u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */ + u8 ring_id; + u8 rbm_id; + u32 desc_id; + enum hal_tcl_desc_type type; + enum hal_tcl_encap_type encap_type; + dma_addr_t paddr; + u32 data_len; + u32 pkt_offset; + enum hal_encrypt_type encrypt_type; + u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */ + u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */ + u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */ + u16 bss_ast_hash; + u16 bss_ast_idx; + u8 tid; + u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */ + u8 lmac_id; + u8 vdev_id; + u8 dscp_tid_tbl_idx; + bool enable_mesh; + int bank_id; +}; + +/* TODO: Check if the actual desc macros can be used instead */ +#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0) +#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1) +#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2) +#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3) +#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4) +#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5) +#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6) + +#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring) + +/* Tx status parsed from srng desc */ +struct hal_tx_status { + enum hal_wbm_rel_src_module buf_rel_source; + enum hal_wbm_tqm_rel_reason status; + u8 ack_rssi; + u32 flags; /* %HAL_TX_STATUS_FLAGS_ */ + u32 ppdu_id; + u8 try_cnt; + u8 tid; + u16 peer_id; + u32 rate_stats; +}; + +#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16) +#define HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B BIT(20) +#define HAL_TX_PHY_DESC_INFO0_PKT_TYPE GENMASK(24, 21) +#define HAL_TX_PHY_DESC_INFO0_BANDWIDTH GENMASK(30, 28) +#define HAL_TX_PHY_DESC_INFO1_MCS GENMASK(3, 0) +#define HAL_TX_PHY_DESC_INFO1_STBC BIT(6) +#define HAL_TX_PHY_DESC_INFO2_NSS GENMASK(23, 21) +#define HAL_TX_PHY_DESC_INFO3_AP_PKT_BW GENMASK(6, 4) +#define HAL_TX_PHY_DESC_INFO3_LTF_SIZE GENMASK(20, 19) +#define HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL GENMASK(17, 15) + +struct hal_tx_phy_desc { + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; +} __packed; + +#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0 GENMASK(15, 0) +#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16 GENMASK(31, 16) +#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0 GENMASK(15, 0) +#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16 GENMASK(31, 16) + +struct hal_tx_fes_status_prot { + __le64 reserved; + __le32 info0; + __le32 info1; + __le32 reserved1[11]; +} __packed; + +#define HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION GENMASK(15, 0) + +struct hal_tx_fes_status_user_ppdu { + __le64 reserved; + __le32 info0; + __le32 reserved1[3]; +} __packed; + +#define HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32 GENMASK(31, 0) +#define HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32 GENMASK(31, 0) + +struct hal_tx_fes_status_start_prot { + __le32 info0; + __le32 info1; + __le64 reserved; +} __packed; + +#define HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE GENMASK(29, 27) + +struct hal_tx_fes_status_start { + __le32 reserved; + __le32 info0; + __le64 reserved1; +} __packed; + +#define HAL_TX_Q_EXT_INFO0_FRAME_CTRL GENMASK(15, 0) +#define HAL_TX_Q_EXT_INFO0_QOS_CTRL GENMASK(31, 16) +#define HAL_TX_Q_EXT_INFO1_AMPDU_FLAG BIT(0) + +struct hal_tx_queue_exten { + __le32 info0; + __le32 info1; +} __packed; + +#define HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS GENMASK(28, 23) + +struct hal_tx_fes_setup { + __le32 schedule_id; + __le32 info0; + __le64 reserved; +} __packed; + +#define HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE GENMASK(2, 0) +#define HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0 GENMASK(31, 0) +#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32 GENMASK(15, 0) +#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0 GENMASK(31, 16) +#define HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16 GENMASK(31, 0) +#define HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0 GENMASK(31, 0) +#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32 GENMASK(15, 0) +#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0 GENMASK(31, 16) +#define HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16 GENMASK(31, 0) + +struct hal_tx_pcu_ppdu_setup_init { + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 reserved; + __le32 info4; + __le32 info5; + __le32 info6; +} __packed; + +#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0 GENMASK(15, 0) +#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16 GENMASK(31, 16) + +struct hal_tx_fes_status_end { + __le32 reserved[2]; + __le32 info0; + __le32 reserved1[19]; +} __packed; + +#define HAL_TX_BANK_CONFIG_EPD BIT(0) +#define HAL_TX_BANK_CONFIG_ENCAP_TYPE GENMASK(2, 1) +#define HAL_TX_BANK_CONFIG_ENCRYPT_TYPE GENMASK(6, 3) +#define HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP BIT(7) +#define HAL_TX_BANK_CONFIG_LINK_META_SWAP BIT(8) +#define HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN BIT(9) +#define HAL_TX_BANK_CONFIG_ADDRX_EN BIT(10) +#define HAL_TX_BANK_CONFIG_ADDRY_EN BIT(11) +#define HAL_TX_BANK_CONFIG_MESH_EN GENMASK(13, 12) +#define HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN BIT(14) +#define HAL_TX_BANK_CONFIG_PMAC_ID GENMASK(16, 15) +/* STA mode will have MCAST_PKT_CTRL instead of DSCP_TID_MAP bitfield */ +#define HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID GENMASK(22, 17) + +void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab, + struct hal_tcl_data_cmd *tcl_cmd, + struct hal_tx_info *ti); +void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id); +int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd); +void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config, + u8 bank_id); +#endif diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h new file mode 100644 index 000000000000..54490cdb63a1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HIF_H +#define ATH12K_HIF_H + +#include "core.h" + +struct ath12k_hif_ops { + u32 (*read32)(struct ath12k_base *sc, u32 address); + void (*write32)(struct ath12k_base *sc, u32 address, u32 data); + void (*irq_enable)(struct ath12k_base *sc); + void (*irq_disable)(struct ath12k_base *sc); + int (*start)(struct ath12k_base *sc); + void (*stop)(struct ath12k_base *sc); + int (*power_up)(struct ath12k_base *sc); + void (*power_down)(struct ath12k_base *sc); + int (*suspend)(struct ath12k_base *ab); + int (*resume)(struct ath12k_base *ab); + int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); + int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); + void (*get_msi_address)(struct ath12k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi); + void (*ce_irq_enable)(struct ath12k_base *ab); + void (*ce_irq_disable)(struct ath12k_base *ab); + void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx); +}; + +static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + return ab->hif.ops->map_service_to_pipe(ab, service_id, + ul_pipe, dl_pipe); +} + +static inline int ath12k_hif_get_user_msi_vector(struct ath12k_base *ab, + char *user_name, + int *num_vectors, + u32 *user_base_data, + u32 *base_vector) +{ + if (!ab->hif.ops->get_user_msi_vector) + return -EOPNOTSUPP; + + return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors, + user_base_data, + base_vector); +} + +static inline void ath12k_hif_get_msi_address(struct ath12k_base *ab, + u32 *msi_addr_lo, + u32 *msi_addr_hi) +{ + if (!ab->hif.ops->get_msi_address) + return; + + ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi); +} + +static inline void ath12k_hif_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, + u32 *msi_data_idx) +{ + if (ab->hif.ops->get_ce_msi_idx) + ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx); + else + *msi_data_idx = ce_id; +} + +static inline void ath12k_hif_ce_irq_enable(struct ath12k_base *ab) +{ + if (ab->hif.ops->ce_irq_enable) + ab->hif.ops->ce_irq_enable(ab); +} + +static inline void ath12k_hif_ce_irq_disable(struct ath12k_base *ab) +{ + if (ab->hif.ops->ce_irq_disable) + ab->hif.ops->ce_irq_disable(ab); +} + +static inline void ath12k_hif_irq_enable(struct ath12k_base *ab) +{ + ab->hif.ops->irq_enable(ab); +} + +static inline void ath12k_hif_irq_disable(struct ath12k_base *ab) +{ + ab->hif.ops->irq_disable(ab); +} + +static inline int ath12k_hif_suspend(struct ath12k_base *ab) +{ + if (ab->hif.ops->suspend) + return ab->hif.ops->suspend(ab); + + return 0; +} + +static inline int ath12k_hif_resume(struct ath12k_base *ab) +{ + if (ab->hif.ops->resume) + return ab->hif.ops->resume(ab); + + return 0; +} + +static inline int ath12k_hif_start(struct ath12k_base *ab) +{ + return ab->hif.ops->start(ab); +} + +static inline void ath12k_hif_stop(struct ath12k_base *ab) +{ + ab->hif.ops->stop(ab); +} + +static inline u32 ath12k_hif_read32(struct ath12k_base *ab, u32 address) +{ + return ab->hif.ops->read32(ab, address); +} + +static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address, + u32 data) +{ + ab->hif.ops->write32(ab, address, data); +} + +static inline int ath12k_hif_power_up(struct ath12k_base *ab) +{ + return ab->hif.ops->power_up(ab); +} + +static inline void ath12k_hif_power_down(struct ath12k_base *ab) +{ + ab->hif.ops->power_down(ab); +} + +#endif /* ATH12K_HIF_H */ diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c new file mode 100644 index 000000000000..23f7428abd95 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/htc.c @@ -0,0 +1,789 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/skbuff.h> +#include <linux/ctype.h> + +#include "debug.h" +#include "hif.h" + +struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ab, int size) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(size + sizeof(struct ath12k_htc_hdr)); + if (!skb) + return NULL; + + skb_reserve(skb, sizeof(struct ath12k_htc_hdr)); + + /* FW/HTC requires 4-byte aligned streams */ + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + ath12k_warn(ab, "Unaligned HTC tx skb\n"); + + return skb; +} + +static void ath12k_htc_control_tx_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static struct sk_buff *ath12k_htc_build_tx_ctrl_skb(void) +{ + struct sk_buff *skb; + struct ath12k_skb_cb *skb_cb; + + skb = dev_alloc_skb(ATH12K_HTC_CONTROL_BUFFER_SIZE); + if (!skb) + return NULL; + + skb_reserve(skb, sizeof(struct ath12k_htc_hdr)); + WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); + + skb_cb = ATH12K_SKB_CB(skb); + memset(skb_cb, 0, sizeof(*skb_cb)); + + return skb; +} + +static void ath12k_htc_prepare_tx_skb(struct ath12k_htc_ep *ep, + struct sk_buff *skb) +{ + struct ath12k_htc_hdr *hdr; + + hdr = (struct ath12k_htc_hdr *)skb->data; + + memset(hdr, 0, sizeof(*hdr)); + hdr->htc_info = le32_encode_bits(ep->eid, HTC_HDR_ENDPOINTID) | + le32_encode_bits((skb->len - sizeof(*hdr)), + HTC_HDR_PAYLOADLEN); + + if (ep->tx_credit_flow_enabled) + hdr->htc_info |= le32_encode_bits(ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE, + HTC_HDR_FLAGS); + + spin_lock_bh(&ep->htc->tx_lock); + hdr->ctrl_info = le32_encode_bits(ep->seq_no++, HTC_HDR_CONTROLBYTES1); + spin_unlock_bh(&ep->htc->tx_lock); +} + +int ath12k_htc_send(struct ath12k_htc *htc, + enum ath12k_htc_ep_id eid, + struct sk_buff *skb) +{ + struct ath12k_htc_ep *ep = &htc->endpoint[eid]; + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); + struct device *dev = htc->ab->dev; + struct ath12k_base *ab = htc->ab; + int credits = 0; + int ret; + + if (eid >= ATH12K_HTC_EP_COUNT) { + ath12k_warn(ab, "Invalid endpoint id: %d\n", eid); + return -ENOENT; + } + + skb_push(skb, sizeof(struct ath12k_htc_hdr)); + + if (ep->tx_credit_flow_enabled) { + credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); + spin_lock_bh(&htc->tx_lock); + if (ep->tx_credits < credits) { + ath12k_dbg(ab, ATH12K_DBG_HTC, + "htc insufficient credits ep %d required %d available %d\n", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + ret = -EAGAIN; + goto err_pull; + } + ep->tx_credits -= credits; + ath12k_dbg(ab, ATH12K_DBG_HTC, + "htc ep %d consumed %d credits (total %d)\n", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + } + + ath12k_htc_prepare_tx_skb(ep, skb); + + skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, skb_cb->paddr); + if (ret) { + ret = -EIO; + goto err_credits; + } + + ret = ath12k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid); + if (ret) + goto err_unmap; + + return 0; + +err_unmap: + dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); +err_credits: + if (ep->tx_credit_flow_enabled) { + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += credits; + ath12k_dbg(ab, ATH12K_DBG_HTC, + "htc ep %d reverted %d credits back (total %d)\n", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + + if (ep->ep_ops.ep_tx_credits) + ep->ep_ops.ep_tx_credits(htc->ab); + } +err_pull: + skb_pull(skb, sizeof(struct ath12k_htc_hdr)); + return ret; +} + +static void +ath12k_htc_process_credit_report(struct ath12k_htc *htc, + const struct ath12k_htc_credit_report *report, + int len, + enum ath12k_htc_ep_id eid) +{ + struct ath12k_base *ab = htc->ab; + struct ath12k_htc_ep *ep; + int i, n_reports; + + if (len % sizeof(*report)) + ath12k_warn(ab, "Uneven credit report len %d", len); + + n_reports = len / sizeof(*report); + + spin_lock_bh(&htc->tx_lock); + for (i = 0; i < n_reports; i++, report++) { + if (report->eid >= ATH12K_HTC_EP_COUNT) + break; + + ep = &htc->endpoint[report->eid]; + ep->tx_credits += report->credits; + + ath12k_dbg(ab, ATH12K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", + report->eid, report->credits, ep->tx_credits); + + if (ep->ep_ops.ep_tx_credits) { + spin_unlock_bh(&htc->tx_lock); + ep->ep_ops.ep_tx_credits(htc->ab); + spin_lock_bh(&htc->tx_lock); + } + } + spin_unlock_bh(&htc->tx_lock); +} + +static int ath12k_htc_process_trailer(struct ath12k_htc *htc, + u8 *buffer, + int length, + enum ath12k_htc_ep_id src_eid) +{ + struct ath12k_base *ab = htc->ab; + int status = 0; + struct ath12k_htc_record *record; + size_t len; + + while (length > 0) { + record = (struct ath12k_htc_record *)buffer; + + if (length < sizeof(record->hdr)) { + status = -EINVAL; + break; + } + + if (record->hdr.len > length) { + /* no room left in buffer for record */ + ath12k_warn(ab, "Invalid record length: %d\n", + record->hdr.len); + status = -EINVAL; + break; + } + + switch (record->hdr.id) { + case ATH12K_HTC_RECORD_CREDITS: + len = sizeof(struct ath12k_htc_credit_report); + if (record->hdr.len < len) { + ath12k_warn(ab, "Credit report too long\n"); + status = -EINVAL; + break; + } + ath12k_htc_process_credit_report(htc, + record->credit_report, + record->hdr.len, + src_eid); + break; + default: + ath12k_warn(ab, "Unhandled record: id:%d length:%d\n", + record->hdr.id, record->hdr.len); + break; + } + + if (status) + break; + + /* multiple records may be present in a trailer */ + buffer += sizeof(record->hdr) + record->hdr.len; + length -= sizeof(record->hdr) + record->hdr.len; + } + + return status; +} + +static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack) +{ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot suspend complete %d\n", ack); + + if (ack) + set_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + else + clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + complete(&ab->htc_suspend); +} + +void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + int status = 0; + struct ath12k_htc *htc = &ab->htc; + struct ath12k_htc_hdr *hdr; + struct ath12k_htc_ep *ep; + u16 payload_len; + u32 trailer_len = 0; + size_t min_len; + u8 eid; + bool trailer_present; + + hdr = (struct ath12k_htc_hdr *)skb->data; + skb_pull(skb, sizeof(*hdr)); + + eid = le32_get_bits(hdr->htc_info, HTC_HDR_ENDPOINTID); + + if (eid >= ATH12K_HTC_EP_COUNT) { + ath12k_warn(ab, "HTC Rx: invalid eid %d\n", eid); + goto out; + } + + ep = &htc->endpoint[eid]; + + payload_len = le32_get_bits(hdr->htc_info, HTC_HDR_PAYLOADLEN); + + if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) { + ath12k_warn(ab, "HTC rx frame too long, len: %zu\n", + payload_len + sizeof(*hdr)); + goto out; + } + + if (skb->len < payload_len) { + ath12k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n", + skb->len, payload_len); + goto out; + } + + /* get flags to check for trailer */ + trailer_present = le32_get_bits(hdr->htc_info, HTC_HDR_FLAGS) & + ATH12K_HTC_FLAG_TRAILER_PRESENT; + + if (trailer_present) { + u8 *trailer; + + trailer_len = le32_get_bits(hdr->ctrl_info, + HTC_HDR_CONTROLBYTES0); + min_len = sizeof(struct ath12k_htc_record_hdr); + + if ((trailer_len < min_len) || + (trailer_len > payload_len)) { + ath12k_warn(ab, "Invalid trailer length: %d\n", + trailer_len); + goto out; + } + + trailer = (u8 *)hdr; + trailer += sizeof(*hdr); + trailer += payload_len; + trailer -= trailer_len; + status = ath12k_htc_process_trailer(htc, trailer, + trailer_len, eid); + if (status) + goto out; + + skb_trim(skb, skb->len - trailer_len); + } + + if (trailer_len >= payload_len) + /* zero length packet with trailer data, just drop these */ + goto out; + + if (eid == ATH12K_HTC_EP_0) { + struct ath12k_htc_msg *msg = (struct ath12k_htc_msg *)skb->data; + + switch (le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID)) { + case ATH12K_HTC_MSG_READY_ID: + case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID: + /* handle HTC control message */ + if (completion_done(&htc->ctl_resp)) { + /* this is a fatal error, target should not be + * sending unsolicited messages on the ep 0 + */ + ath12k_warn(ab, "HTC rx ctrl still processing\n"); + complete(&htc->ctl_resp); + goto out; + } + + htc->control_resp_len = + min_t(int, skb->len, + ATH12K_HTC_MAX_CTRL_MSG_LEN); + + memcpy(htc->control_resp_buffer, skb->data, + htc->control_resp_len); + + complete(&htc->ctl_resp); + break; + case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE: + ath12k_htc_suspend_complete(ab, true); + break; + case ATH12K_HTC_MSG_NACK_SUSPEND: + ath12k_htc_suspend_complete(ab, false); + break; + case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + break; + default: + ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n", + le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID)); + break; + } + goto out; + } + + ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n", + eid, skb); + ep->ep_ops.ep_rx_complete(ab, skb); + + /* poll tx completion for interrupt disabled CE's */ + ath12k_ce_poll_send_completed(ab, ep->ul_pipe_id); + + /* skb is now owned by the rx completion handler */ + skb = NULL; +out: + kfree_skb(skb); +} + +static void ath12k_htc_control_rx_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + /* This is unexpected. FW is not supposed to send regular rx on this + * endpoint. + */ + ath12k_warn(ab, "unexpected htc rx\n"); + kfree_skb(skb); +} + +static const char *htc_service_name(enum ath12k_htc_svc_id id) +{ + switch (id) { + case ATH12K_HTC_SVC_ID_RESERVED: + return "Reserved"; + case ATH12K_HTC_SVC_ID_RSVD_CTRL: + return "Control"; + case ATH12K_HTC_SVC_ID_WMI_CONTROL: + return "WMI"; + case ATH12K_HTC_SVC_ID_WMI_DATA_BE: + return "DATA BE"; + case ATH12K_HTC_SVC_ID_WMI_DATA_BK: + return "DATA BK"; + case ATH12K_HTC_SVC_ID_WMI_DATA_VI: + return "DATA VI"; + case ATH12K_HTC_SVC_ID_WMI_DATA_VO: + return "DATA VO"; + case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1: + return "WMI MAC1"; + case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2: + return "WMI MAC2"; + case ATH12K_HTC_SVC_ID_NMI_CONTROL: + return "NMI Control"; + case ATH12K_HTC_SVC_ID_NMI_DATA: + return "NMI Data"; + case ATH12K_HTC_SVC_ID_HTT_DATA_MSG: + return "HTT Data"; + case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS: + return "RAW"; + case ATH12K_HTC_SVC_ID_IPA_TX: + return "IPA TX"; + case ATH12K_HTC_SVC_ID_PKT_LOG: + return "PKT LOG"; + case ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG: + return "WMI DIAG"; + } + + return "Unknown"; +} + +static void ath12k_htc_reset_endpoint_states(struct ath12k_htc *htc) +{ + struct ath12k_htc_ep *ep; + int i; + + for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) { + ep = &htc->endpoint[i]; + ep->service_id = ATH12K_HTC_SVC_ID_UNUSED; + ep->max_ep_message_len = 0; + ep->max_tx_queue_depth = 0; + ep->eid = i; + ep->htc = htc; + ep->tx_credit_flow_enabled = true; + } +} + +static u8 ath12k_htc_get_credit_allocation(struct ath12k_htc *htc, + u16 service_id) +{ + struct ath12k_htc_svc_tx_credits *serv_entry; + u8 i, allocation = 0; + + serv_entry = htc->service_alloc_table; + + for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { + if (serv_entry[i].service_id == service_id) { + allocation = serv_entry[i].credit_allocation; + break; + } + } + + return allocation; +} + +static int ath12k_htc_setup_target_buffer_assignments(struct ath12k_htc *htc) +{ + struct ath12k_htc_svc_tx_credits *serv_entry; + static const u32 svc_id[] = { + ATH12K_HTC_SVC_ID_WMI_CONTROL, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2, + }; + int i, credits; + + credits = htc->total_transmit_credits; + serv_entry = htc->service_alloc_table; + + if ((htc->wmi_ep_count == 0) || + (htc->wmi_ep_count > ARRAY_SIZE(svc_id))) + return -EINVAL; + + /* Divide credits among number of endpoints for WMI */ + credits = credits / htc->wmi_ep_count; + for (i = 0; i < htc->wmi_ep_count; i++) { + serv_entry[i].service_id = svc_id[i]; + serv_entry[i].credit_allocation = credits; + } + + return 0; +} + +int ath12k_htc_wait_target(struct ath12k_htc *htc) +{ + int i, status = 0; + struct ath12k_base *ab = htc->ab; + unsigned long time_left; + struct ath12k_htc_ready *ready; + u16 message_id; + u16 credit_count; + u16 credit_size; + + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ATH12K_HTC_WAIT_TIMEOUT_HZ); + if (!time_left) { + ath12k_warn(ab, "failed to receive control response completion, polling..\n"); + + for (i = 0; i < ab->hw_params->ce_count; i++) + ath12k_ce_per_engine_service(htc->ab, i); + + time_left = + wait_for_completion_timeout(&htc->ctl_resp, + ATH12K_HTC_WAIT_TIMEOUT_HZ); + + if (!time_left) + status = -ETIMEDOUT; + } + + if (status < 0) { + ath12k_warn(ab, "ctl_resp never came in (%d)\n", status); + return status; + } + + if (htc->control_resp_len < sizeof(*ready)) { + ath12k_warn(ab, "Invalid HTC ready msg len:%d\n", + htc->control_resp_len); + return -ECOMM; + } + + ready = (struct ath12k_htc_ready *)htc->control_resp_buffer; + message_id = le32_get_bits(ready->id_credit_count, HTC_MSG_MESSAGEID); + credit_count = le32_get_bits(ready->id_credit_count, + HTC_READY_MSG_CREDITCOUNT); + credit_size = le32_get_bits(ready->size_ep, HTC_READY_MSG_CREDITSIZE); + + if (message_id != ATH12K_HTC_MSG_READY_ID) { + ath12k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id); + return -ECOMM; + } + + htc->total_transmit_credits = credit_count; + htc->target_credit_size = credit_size; + + ath12k_dbg(ab, ATH12K_DBG_HTC, + "Target ready! transmit resources: %d size:%d\n", + htc->total_transmit_credits, htc->target_credit_size); + + if ((htc->total_transmit_credits == 0) || + (htc->target_credit_size == 0)) { + ath12k_warn(ab, "Invalid credit size received\n"); + return -ECOMM; + } + + ath12k_htc_setup_target_buffer_assignments(htc); + + return 0; +} + +int ath12k_htc_connect_service(struct ath12k_htc *htc, + struct ath12k_htc_svc_conn_req *conn_req, + struct ath12k_htc_svc_conn_resp *conn_resp) +{ + struct ath12k_base *ab = htc->ab; + struct ath12k_htc_conn_svc *req_msg; + struct ath12k_htc_conn_svc_resp resp_msg_dummy; + struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy; + enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT; + struct ath12k_htc_ep *ep; + struct sk_buff *skb; + unsigned int max_msg_size = 0; + int length, status; + unsigned long time_left; + bool disable_credit_flow_ctrl = false; + u16 message_id, service_id, flags = 0; + u8 tx_alloc = 0; + + /* special case for HTC pseudo control service */ + if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) { + disable_credit_flow_ctrl = true; + assigned_eid = ATH12K_HTC_EP_0; + max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN; + memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); + goto setup; + } + + tx_alloc = ath12k_htc_get_credit_allocation(htc, + conn_req->service_id); + if (!tx_alloc) + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "boot htc service %s does not allocate target credits\n", + htc_service_name(conn_req->service_id)); + + skb = ath12k_htc_build_tx_ctrl_skb(); + if (!skb) { + ath12k_warn(ab, "Failed to allocate HTC packet\n"); + return -ENOMEM; + } + + length = sizeof(*req_msg); + skb_put(skb, length); + memset(skb->data, 0, length); + + req_msg = (struct ath12k_htc_conn_svc *)skb->data; + req_msg->msg_svc_id = le32_encode_bits(ATH12K_HTC_MSG_CONNECT_SERVICE_ID, + HTC_MSG_MESSAGEID); + + flags |= u32_encode_bits(tx_alloc, ATH12K_HTC_CONN_FLAGS_RECV_ALLOC); + + /* Only enable credit flow control for WMI ctrl service */ + if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL || + conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 || + conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) { + flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + disable_credit_flow_ctrl = true; + } + + req_msg->flags_len = le32_encode_bits(flags, HTC_SVC_MSG_CONNECTIONFLAGS); + req_msg->msg_svc_id |= le32_encode_bits(conn_req->service_id, + HTC_SVC_MSG_SERVICE_ID); + + reinit_completion(&htc->ctl_resp); + + status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + /* wait for response */ + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ATH12K_HTC_CONN_SVC_TIMEOUT_HZ); + if (!time_left) { + ath12k_err(ab, "Service connect timeout\n"); + return -ETIMEDOUT; + } + + /* we controlled the buffer creation, it's aligned */ + resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer; + message_id = le32_get_bits(resp_msg->msg_svc_id, HTC_MSG_MESSAGEID); + service_id = le32_get_bits(resp_msg->msg_svc_id, + HTC_SVC_RESP_MSG_SERVICEID); + + if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || + (htc->control_resp_len < sizeof(*resp_msg))) { + ath12k_err(ab, "Invalid resp message ID 0x%x", message_id); + return -EPROTO; + } + + ath12k_dbg(ab, ATH12K_DBG_HTC, + "HTC Service %s connect response: status: %u, assigned ep: %u\n", + htc_service_name(service_id), + le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_STATUS), + le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_ENDPOINTID)); + + conn_resp->connect_resp_code = le32_get_bits(resp_msg->flags_len, + HTC_SVC_RESP_MSG_STATUS); + + /* check response status */ + if (conn_resp->connect_resp_code != ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) { + ath12k_err(ab, "HTC Service %s connect request failed: 0x%x)\n", + htc_service_name(service_id), + conn_resp->connect_resp_code); + return -EPROTO; + } + + assigned_eid = le32_get_bits(resp_msg->flags_len, + HTC_SVC_RESP_MSG_ENDPOINTID); + + max_msg_size = le32_get_bits(resp_msg->flags_len, + HTC_SVC_RESP_MSG_MAXMSGSIZE); + +setup: + + if (assigned_eid >= ATH12K_HTC_EP_COUNT) + return -EPROTO; + + if (max_msg_size == 0) + return -EPROTO; + + ep = &htc->endpoint[assigned_eid]; + ep->eid = assigned_eid; + + if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED) + return -EPROTO; + + /* return assigned endpoint to caller */ + conn_resp->eid = assigned_eid; + conn_resp->max_msg_len = le32_get_bits(resp_msg->flags_len, + HTC_SVC_RESP_MSG_MAXMSGSIZE); + + /* setup the endpoint */ + ep->service_id = conn_req->service_id; + ep->max_tx_queue_depth = conn_req->max_send_queue_depth; + ep->max_ep_message_len = le32_get_bits(resp_msg->flags_len, + HTC_SVC_RESP_MSG_MAXMSGSIZE); + ep->tx_credits = tx_alloc; + + /* copy all the callbacks */ + ep->ep_ops = conn_req->ep_ops; + + status = ath12k_hif_map_service_to_pipe(htc->ab, + ep->service_id, + &ep->ul_pipe_id, + &ep->dl_pipe_id); + if (status) + return status; + + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", + htc_service_name(ep->service_id), ep->ul_pipe_id, + ep->dl_pipe_id, ep->eid); + + if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { + ep->tx_credit_flow_enabled = false; + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "boot htc service '%s' eid %d TX flow control disabled\n", + htc_service_name(ep->service_id), assigned_eid); + } + + return status; +} + +int ath12k_htc_start(struct ath12k_htc *htc) +{ + struct sk_buff *skb; + int status; + struct ath12k_base *ab = htc->ab; + struct ath12k_htc_setup_complete_extended *msg; + + skb = ath12k_htc_build_tx_ctrl_skb(); + if (!skb) + return -ENOMEM; + + skb_put(skb, sizeof(*msg)); + memset(skb->data, 0, skb->len); + + msg = (struct ath12k_htc_setup_complete_extended *)skb->data; + msg->msg_id = le32_encode_bits(ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID, + HTC_MSG_MESSAGEID); + + ath12k_dbg(ab, ATH12K_DBG_HTC, "HTC is using TX credit flow control\n"); + + status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + return 0; +} + +int ath12k_htc_init(struct ath12k_base *ab) +{ + struct ath12k_htc *htc = &ab->htc; + struct ath12k_htc_svc_conn_req conn_req = { }; + struct ath12k_htc_svc_conn_resp conn_resp = { }; + int ret; + + spin_lock_init(&htc->tx_lock); + + ath12k_htc_reset_endpoint_states(htc); + + htc->ab = ab; + + switch (ab->wmi_ab.preferred_hw_mode) { + case WMI_HOST_HW_MODE_SINGLE: + htc->wmi_ep_count = 1; + break; + case WMI_HOST_HW_MODE_DBS: + case WMI_HOST_HW_MODE_DBS_OR_SBS: + htc->wmi_ep_count = 2; + break; + case WMI_HOST_HW_MODE_DBS_SBS: + htc->wmi_ep_count = 3; + break; + default: + htc->wmi_ep_count = ab->hw_params->max_radios; + break; + } + + /* setup our pseudo HTC control endpoint connection */ + conn_req.ep_ops.ep_tx_complete = ath12k_htc_control_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath12k_htc_control_rx_complete; + conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS; + conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL; + + /* connect fake service */ + ret = ath12k_htc_connect_service(htc, &conn_req, &conn_resp); + if (ret) { + ath12k_err(ab, "could not connect to htc service (%d)\n", ret); + return ret; + } + + init_completion(&htc->ctl_resp); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/htc.h b/drivers/net/wireless/ath/ath12k/htc.h new file mode 100644 index 000000000000..7e3dccc7cc14 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/htc.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HTC_H +#define ATH12K_HTC_H + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/skbuff.h> +#include <linux/timer.h> + +struct ath12k_base; + +#define HTC_HDR_ENDPOINTID GENMASK(7, 0) +#define HTC_HDR_FLAGS GENMASK(15, 8) +#define HTC_HDR_PAYLOADLEN GENMASK(31, 16) +#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0) +#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8) +#define HTC_HDR_RESERVED GENMASK(31, 16) + +#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16) +#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0) +#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16) +#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16) +#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0) +#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16) + +#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0) +#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8) + +#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16) +#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0) +#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8) +#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16) +#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0) + +#define HTC_MSG_MESSAGEID GENMASK(15, 0) +#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0) +#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0) +#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8) +#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16) +#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24) + +enum ath12k_htc_tx_flags { + ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, + ATH12K_HTC_FLAG_SEND_BUNDLE = 0x02 +}; + +enum ath12k_htc_rx_flags { + ATH12K_HTC_FLAG_TRAILER_PRESENT = 0x02, + ATH12K_HTC_FLAG_BUNDLE_MASK = 0xF0 +}; + +struct ath12k_htc_hdr { + __le32 htc_info; + __le32 ctrl_info; +} __packed __aligned(4); + +enum ath12k_htc_msg_id { + ATH12K_HTC_MSG_READY_ID = 1, + ATH12K_HTC_MSG_CONNECT_SERVICE_ID = 2, + ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3, + ATH12K_HTC_MSG_SETUP_COMPLETE_ID = 4, + ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5, + ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6, + ATH12K_HTC_MSG_NACK_SUSPEND = 7, + ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8, +}; + +enum ath12k_htc_version { + ATH12K_HTC_VERSION_2P0 = 0x00, /* 2.0 */ + ATH12K_HTC_VERSION_2P1 = 0x01, /* 2.1 */ +}; + +enum ath12k_htc_conn_flag_threshold_level { + ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH, + ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF, + ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS, + ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY, +}; + +#define ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0) +#define ATH12K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE BIT(2) +#define ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL BIT(3) +#define ATH12K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8) + +enum ath12k_htc_conn_svc_status { + ATH12K_HTC_CONN_SVC_STATUS_SUCCESS = 0, + ATH12K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1, + ATH12K_HTC_CONN_SVC_STATUS_FAILED = 2, + ATH12K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3, + ATH12K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4 +}; + +struct ath12k_htc_ready { + __le32 id_credit_count; + __le32 size_ep; +} __packed; + +struct ath12k_htc_ready_extended { + struct ath12k_htc_ready base; + __le32 ver_bundle; +} __packed; + +struct ath12k_htc_conn_svc { + __le32 msg_svc_id; + __le32 flags_len; +} __packed; + +struct ath12k_htc_conn_svc_resp { + __le32 msg_svc_id; + __le32 flags_len; + __le32 svc_meta_pad; +} __packed; + +struct ath12k_htc_setup_complete_extended { + __le32 msg_id; + __le32 flags; + __le32 max_msgs_per_bundled_recv; +} __packed; + +struct ath12k_htc_msg { + __le32 msg_svc_id; + __le32 flags_len; +} __packed __aligned(4); + +enum ath12k_htc_record_id { + ATH12K_HTC_RECORD_NULL = 0, + ATH12K_HTC_RECORD_CREDITS = 1 +}; + +struct ath12k_htc_record_hdr { + u8 id; /* @enum ath12k_htc_record_id */ + u8 len; + u8 pad0; + u8 pad1; +} __packed; + +struct ath12k_htc_credit_report { + u8 eid; /* @enum ath12k_htc_ep_id */ + u8 credits; + u8 pad0; + u8 pad1; +} __packed; + +struct ath12k_htc_record { + struct ath12k_htc_record_hdr hdr; + struct ath12k_htc_credit_report credit_report[]; +} __packed __aligned(4); + +/* HTC FRAME structure layout draft + * + * note: the trailer offset is dynamic depending + * on payload length. this is only a struct layout draft + * + *======================================================= + * + * HTC HEADER + * + *======================================================= + * | + * HTC message | payload + * (variable length) | (variable length) + *======================================================= + * + * HTC Record + * + *======================================================= + */ + +enum ath12k_htc_svc_gid { + ATH12K_HTC_SVC_GRP_RSVD = 0, + ATH12K_HTC_SVC_GRP_WMI = 1, + ATH12K_HTC_SVC_GRP_NMI = 2, + ATH12K_HTC_SVC_GRP_HTT = 3, + ATH12K_HTC_SVC_GRP_CFG = 4, + ATH12K_HTC_SVC_GRP_IPA = 5, + ATH12K_HTC_SVC_GRP_PKTLOG = 6, + + ATH12K_HTC_SVC_GRP_TEST = 254, + ATH12K_HTC_SVC_GRP_LAST = 255, +}; + +#define SVC(group, idx) \ + (int)(((int)(group) << 8) | (int)(idx)) + +enum ath12k_htc_svc_id { + /* NOTE: service ID of 0x0000 is reserved and should never be used */ + ATH12K_HTC_SVC_ID_RESERVED = 0x0000, + ATH12K_HTC_SVC_ID_UNUSED = ATH12K_HTC_SVC_ID_RESERVED, + + ATH12K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH12K_HTC_SVC_GRP_RSVD, 1), + ATH12K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_WMI, 0), + ATH12K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH12K_HTC_SVC_GRP_WMI, 1), + ATH12K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH12K_HTC_SVC_GRP_WMI, 2), + ATH12K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH12K_HTC_SVC_GRP_WMI, 3), + ATH12K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH12K_HTC_SVC_GRP_WMI, 4), + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH12K_HTC_SVC_GRP_WMI, 5), + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH12K_HTC_SVC_GRP_WMI, 6), + ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG = SVC(ATH12K_HTC_SVC_GRP_WMI, 7), + + ATH12K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_NMI, 0), + ATH12K_HTC_SVC_ID_NMI_DATA = SVC(ATH12K_HTC_SVC_GRP_NMI, 1), + + ATH12K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH12K_HTC_SVC_GRP_HTT, 0), + + /* raw stream service (i.e. flash, tcmd, calibration apps) */ + ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH12K_HTC_SVC_GRP_TEST, 0), + ATH12K_HTC_SVC_ID_IPA_TX = SVC(ATH12K_HTC_SVC_GRP_IPA, 0), + ATH12K_HTC_SVC_ID_PKT_LOG = SVC(ATH12K_HTC_SVC_GRP_PKTLOG, 0), +}; + +#undef SVC + +enum ath12k_htc_ep_id { + ATH12K_HTC_EP_UNUSED = -1, + ATH12K_HTC_EP_0 = 0, + ATH12K_HTC_EP_1 = 1, + ATH12K_HTC_EP_2, + ATH12K_HTC_EP_3, + ATH12K_HTC_EP_4, + ATH12K_HTC_EP_5, + ATH12K_HTC_EP_6, + ATH12K_HTC_EP_7, + ATH12K_HTC_EP_8, + ATH12K_HTC_EP_COUNT, +}; + +struct ath12k_htc_ep_ops { + void (*ep_tx_complete)(struct ath12k_base *ab, struct sk_buff *skb); + void (*ep_rx_complete)(struct ath12k_base *ab, struct sk_buff *skb); + void (*ep_tx_credits)(struct ath12k_base *ab); +}; + +/* service connection information */ +struct ath12k_htc_svc_conn_req { + u16 service_id; + struct ath12k_htc_ep_ops ep_ops; + int max_send_queue_depth; +}; + +/* service connection response information */ +struct ath12k_htc_svc_conn_resp { + u8 buffer_len; + u8 actual_len; + enum ath12k_htc_ep_id eid; + unsigned int max_msg_len; + u8 connect_resp_code; +}; + +#define ATH12K_NUM_CONTROL_TX_BUFFERS 2 +#define ATH12K_HTC_MAX_LEN 4096 +#define ATH12K_HTC_MAX_CTRL_MSG_LEN 256 +#define ATH12K_HTC_WAIT_TIMEOUT_HZ (1 * HZ) +#define ATH12K_HTC_CONTROL_BUFFER_SIZE (ATH12K_HTC_MAX_CTRL_MSG_LEN + \ + sizeof(struct ath12k_htc_hdr)) +#define ATH12K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ) +#define ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8 + +struct ath12k_htc_ep { + struct ath12k_htc *htc; + enum ath12k_htc_ep_id eid; + enum ath12k_htc_svc_id service_id; + struct ath12k_htc_ep_ops ep_ops; + + int max_tx_queue_depth; + int max_ep_message_len; + u8 ul_pipe_id; + u8 dl_pipe_id; + + u8 seq_no; /* for debugging */ + int tx_credits; + bool tx_credit_flow_enabled; +}; + +struct ath12k_htc_svc_tx_credits { + u16 service_id; + u8 credit_allocation; +}; + +struct ath12k_htc { + struct ath12k_base *ab; + struct ath12k_htc_ep endpoint[ATH12K_HTC_EP_COUNT]; + + /* protects endpoints */ + spinlock_t tx_lock; + + u8 control_resp_buffer[ATH12K_HTC_MAX_CTRL_MSG_LEN]; + int control_resp_len; + + struct completion ctl_resp; + + int total_transmit_credits; + struct ath12k_htc_svc_tx_credits + service_alloc_table[ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES]; + int target_credit_size; + u8 wmi_ep_count; +}; + +int ath12k_htc_init(struct ath12k_base *ar); +int ath12k_htc_wait_target(struct ath12k_htc *htc); +int ath12k_htc_start(struct ath12k_htc *htc); +int ath12k_htc_connect_service(struct ath12k_htc *htc, + struct ath12k_htc_svc_conn_req *conn_req, + struct ath12k_htc_svc_conn_resp *conn_resp); +int ath12k_htc_send(struct ath12k_htc *htc, enum ath12k_htc_ep_id eid, + struct sk_buff *packet); +struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ar, int size); +void ath12k_htc_rx_completion_handler(struct ath12k_base *ar, + struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c new file mode 100644 index 000000000000..91d576fd4b0f --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#include "debug.h" +#include "core.h" +#include "ce.h" +#include "hw.h" +#include "mhi.h" +#include "dp_rx.h" + +static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx) +{ + return pdev_idx; +} + +static int ath12k_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw, + int mac_id) +{ + return mac_id; +} + +static int ath12k_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw, + int mac_id) +{ + return 0; +} + +static u8 ath12k_hw_get_ring_selector_qcn9274(struct sk_buff *skb) +{ + return smp_processor_id(); +} + +static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num) +{ + if (ring_num < 3 || ring_num == 4) + return true; + + return false; +} + +static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw, + int mac_id) +{ + return 0; +} + +static int ath12k_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw, + int mac_id) +{ + return mac_id; +} + +static u8 ath12k_hw_get_ring_selector_wcn7850(struct sk_buff *skb) +{ + return skb_get_queue_mapping(skb); +} + +static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num) +{ + if (ring_num == 0 || ring_num == 2 || ring_num == 4) + return true; + + return false; +} + +static const struct ath12k_hw_ops qcn9274_ops = { + .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id, + .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274, + .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_qcn9274, + .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274, + .get_ring_selector = ath12k_hw_get_ring_selector_qcn9274, + .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274, +}; + +static const struct ath12k_hw_ops wcn7850_ops = { + .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id, + .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_wcn7850, + .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_wcn7850, + .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850, + .get_ring_selector = ath12k_hw_get_ring_selector_wcn7850, + .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850, +}; + +#define ATH12K_TX_RING_MASK_0 0x1 +#define ATH12K_TX_RING_MASK_1 0x2 +#define ATH12K_TX_RING_MASK_2 0x4 +#define ATH12K_TX_RING_MASK_3 0x8 +#define ATH12K_TX_RING_MASK_4 0x10 + +#define ATH12K_RX_RING_MASK_0 0x1 +#define ATH12K_RX_RING_MASK_1 0x2 +#define ATH12K_RX_RING_MASK_2 0x4 +#define ATH12K_RX_RING_MASK_3 0x8 + +#define ATH12K_RX_ERR_RING_MASK_0 0x1 + +#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1 + +#define ATH12K_REO_STATUS_RING_MASK_0 0x1 + +#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1 + +#define ATH12K_RX_MON_RING_MASK_0 0x1 +#define ATH12K_RX_MON_RING_MASK_1 0x2 +#define ATH12K_RX_MON_RING_MASK_2 0x4 + +#define ATH12K_TX_MON_RING_MASK_0 0x1 +#define ATH12K_TX_MON_RING_MASK_1 0x2 + +/* Target firmware's Copy Engine configuration. */ +static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI (mac0) */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host Pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7: host->target WMI (mac1) */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE8: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE9, 10 and 11: Reserved for MHI */ + + /* CE12: Target CV prefetch */ + { + .pipenum = __cpu_to_le32(12), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE13: Target CV prefetch */ + { + .pipenum = __cpu_to_le32(13), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE14: WMI logging/CFR/Spectral/Radar */ + { + .pipenum = __cpu_to_le32(14), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE15: Reserved */ +}; + +/* Target firmware's Copy Engine configuration. */ +static const struct ce_pipe_config ath12k_target_ce_config_wlan_wcn7850[] = { + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE2: target->host WMI */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE3: host->target WMI */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE4: host->target HTT */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE5: target->host Pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + + /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host used only by IPA */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(16384), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), + }, + /* CE 9, 10, 11 are used by MHI driver */ +}; + +/* Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9274[] = { + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(7), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(5), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(14), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850[] = { + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { + .tx = { + ATH12K_TX_RING_MASK_0, + ATH12K_TX_RING_MASK_1, + ATH12K_TX_RING_MASK_2, + ATH12K_TX_RING_MASK_3, + }, + .rx_mon_dest = { + 0, 0, 0, + ATH12K_RX_MON_RING_MASK_0, + ATH12K_RX_MON_RING_MASK_1, + ATH12K_RX_MON_RING_MASK_2, + }, + .rx = { + 0, 0, 0, 0, + ATH12K_RX_RING_MASK_0, + ATH12K_RX_RING_MASK_1, + ATH12K_RX_RING_MASK_2, + ATH12K_RX_RING_MASK_3, + }, + .rx_err = { + 0, 0, 0, + ATH12K_RX_ERR_RING_MASK_0, + }, + .rx_wbm_rel = { + 0, 0, 0, + ATH12K_RX_WBM_REL_RING_MASK_0, + }, + .reo_status = { + 0, 0, 0, + ATH12K_REO_STATUS_RING_MASK_0, + }, + .host2rxdma = { + 0, 0, 0, + ATH12K_HOST2RXDMA_RING_MASK_0, + }, + .tx_mon_dest = { + ATH12K_TX_MON_RING_MASK_0, + ATH12K_TX_MON_RING_MASK_1, + }, +}; + +static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = { + .tx = { + ATH12K_TX_RING_MASK_0, + ATH12K_TX_RING_MASK_2, + ATH12K_TX_RING_MASK_4, + }, + .rx_mon_dest = { + }, + .rx = { + 0, 0, 0, + ATH12K_RX_RING_MASK_0, + ATH12K_RX_RING_MASK_1, + ATH12K_RX_RING_MASK_2, + ATH12K_RX_RING_MASK_3, + }, + .rx_err = { + ATH12K_RX_ERR_RING_MASK_0, + }, + .rx_wbm_rel = { + ATH12K_RX_WBM_REL_RING_MASK_0, + }, + .reo_status = { + ATH12K_REO_STATUS_RING_MASK_0, + }, + .host2rxdma = { + }, + .tx_mon_dest = { + }, +}; + +static const struct ath12k_hw_regs qcn9274_v1_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_id = 0x00000908, + .hal_tcl1_ring_misc = 0x00000910, + .hal_tcl1_ring_tp_addr_lsb = 0x0000091c, + .hal_tcl1_ring_tp_addr_msb = 0x00000920, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .hal_tcl1_ring_msi1_base_lsb = 0x00000948, + .hal_tcl1_ring_msi1_base_msb = 0x0000094c, + .hal_tcl1_ring_msi1_data = 0x00000950, + .hal_tcl_ring_base_lsb = 0x00000b58, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x00000d38, + + .hal_wbm_idle_ring_base_lsb = 0x00000d0c, + .hal_wbm_idle_ring_misc_addr = 0x00000d1c, + .hal_wbm_r0_idle_list_cntl_addr = 0x00000210, + .hal_wbm_r0_idle_list_size_addr = 0x00000214, + .hal_wbm_scattered_ring_base_lsb = 0x00000220, + .hal_wbm_scattered_ring_base_msb = 0x00000224, + .hal_wbm_scattered_desc_head_info_ix0 = 0x00000230, + .hal_wbm_scattered_desc_head_info_ix1 = 0x00000234, + .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240, + .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244, + .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c, + + .hal_wbm_sw_release_ring_base_lsb = 0x0000034c, + .hal_wbm_sw1_release_ring_base_lsb = 0x000003c4, + .hal_wbm0_release_ring_base_lsb = 0x00000dd8, + .hal_wbm1_release_ring_base_lsb = 0x00000e50, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0d45c, + + /* PPE release ring address */ + .hal_ppe_rel_ring_base = 0x0000043c, + + /* REO DEST ring address */ + .hal_reo2_ring_base = 0x0000055c, + .hal_reo1_misc_ctrl_addr = 0x00000b7c, + .hal_reo1_sw_cookie_cfg0 = 0x00000050, + .hal_reo1_sw_cookie_cfg1 = 0x00000054, + .hal_reo1_qdesc_lut_base0 = 0x00000058, + .hal_reo1_qdesc_lut_base1 = 0x0000005c, + .hal_reo1_ring_base_lsb = 0x000004e4, + .hal_reo1_ring_base_msb = 0x000004e8, + .hal_reo1_ring_id = 0x000004ec, + .hal_reo1_ring_misc = 0x000004f4, + .hal_reo1_ring_hp_addr_lsb = 0x000004f8, + .hal_reo1_ring_hp_addr_msb = 0x000004fc, + .hal_reo1_ring_producer_int_setup = 0x00000508, + .hal_reo1_ring_msi1_base_lsb = 0x0000052C, + .hal_reo1_ring_msi1_base_msb = 0x00000530, + .hal_reo1_ring_msi1_data = 0x00000534, + .hal_reo1_aging_thres_ix0 = 0x00000b08, + .hal_reo1_aging_thres_ix1 = 0x00000b0c, + .hal_reo1_aging_thres_ix2 = 0x00000b10, + .hal_reo1_aging_thres_ix3 = 0x00000b14, + + /* REO Exception ring address */ + .hal_reo2_sw0_ring_base = 0x000008a4, + + /* REO Reinject ring address */ + .hal_sw2reo_ring_base = 0x00000304, + .hal_sw2reo1_ring_base = 0x0000037c, + + /* REO cmd ring address */ + .hal_reo_cmd_ring_base = 0x0000028c, + + /* REO status ring address */ + .hal_reo_status_ring_base = 0x00000a84, +}; + +static const struct ath12k_hw_regs qcn9274_v2_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_id = 0x00000908, + .hal_tcl1_ring_misc = 0x00000910, + .hal_tcl1_ring_tp_addr_lsb = 0x0000091c, + .hal_tcl1_ring_tp_addr_msb = 0x00000920, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .hal_tcl1_ring_msi1_base_lsb = 0x00000948, + .hal_tcl1_ring_msi1_base_msb = 0x0000094c, + .hal_tcl1_ring_msi1_data = 0x00000950, + .hal_tcl_ring_base_lsb = 0x00000b58, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x00000d38, + + /* WBM idle link ring address */ + .hal_wbm_idle_ring_base_lsb = 0x00000d3c, + .hal_wbm_idle_ring_misc_addr = 0x00000d4c, + .hal_wbm_r0_idle_list_cntl_addr = 0x00000240, + .hal_wbm_r0_idle_list_size_addr = 0x00000244, + .hal_wbm_scattered_ring_base_lsb = 0x00000250, + .hal_wbm_scattered_ring_base_msb = 0x00000254, + .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260, + .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264, + .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270, + .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274, + .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c, + + /* SW2WBM release ring address */ + .hal_wbm_sw_release_ring_base_lsb = 0x0000037c, + .hal_wbm_sw1_release_ring_base_lsb = 0x000003f4, + + /* WBM2SW release ring address */ + .hal_wbm0_release_ring_base_lsb = 0x00000e08, + .hal_wbm1_release_ring_base_lsb = 0x00000e80, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0d45c, + + /* PPE release ring address */ + .hal_ppe_rel_ring_base = 0x0000046c, + + /* REO DEST ring address */ + .hal_reo2_ring_base = 0x00000578, + .hal_reo1_misc_ctrl_addr = 0x00000b9c, + .hal_reo1_sw_cookie_cfg0 = 0x0000006c, + .hal_reo1_sw_cookie_cfg1 = 0x00000070, + .hal_reo1_qdesc_lut_base0 = 0x00000074, + .hal_reo1_qdesc_lut_base1 = 0x00000078, + .hal_reo1_ring_base_lsb = 0x00000500, + .hal_reo1_ring_base_msb = 0x00000504, + .hal_reo1_ring_id = 0x00000508, + .hal_reo1_ring_misc = 0x00000510, + .hal_reo1_ring_hp_addr_lsb = 0x00000514, + .hal_reo1_ring_hp_addr_msb = 0x00000518, + .hal_reo1_ring_producer_int_setup = 0x00000524, + .hal_reo1_ring_msi1_base_lsb = 0x00000548, + .hal_reo1_ring_msi1_base_msb = 0x0000054C, + .hal_reo1_ring_msi1_data = 0x00000550, + .hal_reo1_aging_thres_ix0 = 0x00000B28, + .hal_reo1_aging_thres_ix1 = 0x00000B2C, + .hal_reo1_aging_thres_ix2 = 0x00000B30, + .hal_reo1_aging_thres_ix3 = 0x00000B34, + + /* REO Exception ring address */ + .hal_reo2_sw0_ring_base = 0x000008c0, + + /* REO Reinject ring address */ + .hal_sw2reo_ring_base = 0x00000320, + .hal_sw2reo1_ring_base = 0x00000398, + + /* REO cmd ring address */ + .hal_reo_cmd_ring_base = 0x000002A8, + + /* REO status ring address */ + .hal_reo_status_ring_base = 0x00000aa0, +}; + +static const struct ath12k_hw_regs wcn7850_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_id = 0x00000908, + .hal_tcl1_ring_misc = 0x00000910, + .hal_tcl1_ring_tp_addr_lsb = 0x0000091c, + .hal_tcl1_ring_tp_addr_msb = 0x00000920, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .hal_tcl1_ring_msi1_base_lsb = 0x00000948, + .hal_tcl1_ring_msi1_base_msb = 0x0000094c, + .hal_tcl1_ring_msi1_data = 0x00000950, + .hal_tcl_ring_base_lsb = 0x00000b58, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x00000d38, + + .hal_wbm_idle_ring_base_lsb = 0x00000d3c, + .hal_wbm_idle_ring_misc_addr = 0x00000d4c, + .hal_wbm_r0_idle_list_cntl_addr = 0x00000240, + .hal_wbm_r0_idle_list_size_addr = 0x00000244, + .hal_wbm_scattered_ring_base_lsb = 0x00000250, + .hal_wbm_scattered_ring_base_msb = 0x00000254, + .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260, + .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264, + .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270, + .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274, + .hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c, + + .hal_wbm_sw_release_ring_base_lsb = 0x0000037c, + .hal_wbm_sw1_release_ring_base_lsb = 0x00000284, + .hal_wbm0_release_ring_base_lsb = 0x00000e08, + .hal_wbm1_release_ring_base_lsb = 0x00000e80, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0f45c, + + /* PPE release ring address */ + .hal_ppe_rel_ring_base = 0x0000043c, + + /* REO DEST ring address */ + .hal_reo2_ring_base = 0x0000055c, + .hal_reo1_misc_ctrl_addr = 0x00000b7c, + .hal_reo1_sw_cookie_cfg0 = 0x00000050, + .hal_reo1_sw_cookie_cfg1 = 0x00000054, + .hal_reo1_qdesc_lut_base0 = 0x00000058, + .hal_reo1_qdesc_lut_base1 = 0x0000005c, + .hal_reo1_ring_base_lsb = 0x000004e4, + .hal_reo1_ring_base_msb = 0x000004e8, + .hal_reo1_ring_id = 0x000004ec, + .hal_reo1_ring_misc = 0x000004f4, + .hal_reo1_ring_hp_addr_lsb = 0x000004f8, + .hal_reo1_ring_hp_addr_msb = 0x000004fc, + .hal_reo1_ring_producer_int_setup = 0x00000508, + .hal_reo1_ring_msi1_base_lsb = 0x0000052C, + .hal_reo1_ring_msi1_base_msb = 0x00000530, + .hal_reo1_ring_msi1_data = 0x00000534, + .hal_reo1_aging_thres_ix0 = 0x00000b08, + .hal_reo1_aging_thres_ix1 = 0x00000b0c, + .hal_reo1_aging_thres_ix2 = 0x00000b10, + .hal_reo1_aging_thres_ix3 = 0x00000b14, + + /* REO Exception ring address */ + .hal_reo2_sw0_ring_base = 0x000008a4, + + /* REO Reinject ring address */ + .hal_sw2reo_ring_base = 0x00000304, + .hal_sw2reo1_ring_base = 0x0000037c, + + /* REO cmd ring address */ + .hal_reo_cmd_ring_base = 0x0000028c, + + /* REO status ring address */ + .hal_reo_status_ring_base = 0x00000a84, +}; + +static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM, + .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN, +}; + +static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM, + .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN, +}; + +static const struct ath12k_hw_params ath12k_hw_params[] = { + { + .name = "qcn9274 hw1.0", + .hw_rev = ATH12K_HW_QCN9274_HW10, + .fw = { + .dir = "QCN9274/hw1.0", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, + .max_radios = 1, + .single_pdev_only = false, + .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274, + .internal_sleep_clock = false, + + .hw_ops = &qcn9274_ops, + .ring_mask = &ath12k_hw_ring_mask_qcn9274, + .regs = &qcn9274_v1_regs, + + .host_ce_config = ath12k_host_ce_config_qcn9274, + .ce_count = 16, + .target_ce_config = ath12k_target_ce_config_wlan_qcn9274, + .target_ce_count = 12, + .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274, + .svc_to_ce_map_len = 18, + + .hal_params = &ath12k_hw_hal_params_qcn9274, + + .rxdma1_enable = false, + .num_rxmda_per_pdev = 1, + .num_rxdma_dst_ring = 0, + .rx_mac_buf_ring = false, + .vdev_start_delay = false, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .supports_monitor = false, + + .idle_ps = false, + .download_calib = true, + .supports_suspend = false, + .tcl_ring_retry = true, + .reoq_lut_support = false, + .supports_shadow_regs = false, + + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), + .num_tcl_banks = 48, + .max_tx_ring = 4, + + .mhi_config = &ath12k_mhi_config_qcn9274, + + .wmi_init = ath12k_wmi_init_qcn9274, + + .hal_ops = &hal_qcn9274_ops, + + }, + { + .name = "wcn7850 hw2.0", + .hw_rev = ATH12K_HW_WCN7850_HW20, + + .fw = { + .dir = "WCN7850/hw2.0", + .board_size = 256 * 1024, + .cal_offset = 256 * 1024, + }, + + .max_radios = 1, + .single_pdev_only = true, + .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850, + .internal_sleep_clock = true, + + .hw_ops = &wcn7850_ops, + .ring_mask = &ath12k_hw_ring_mask_wcn7850, + .regs = &wcn7850_regs, + + .host_ce_config = ath12k_host_ce_config_wcn7850, + .ce_count = 9, + .target_ce_config = ath12k_target_ce_config_wlan_wcn7850, + .target_ce_count = 9, + .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_wcn7850, + .svc_to_ce_map_len = 14, + + .hal_params = &ath12k_hw_hal_params_wcn7850, + + .rxdma1_enable = false, + .num_rxmda_per_pdev = 2, + .num_rxdma_dst_ring = 1, + .rx_mac_buf_ring = true, + .vdev_start_delay = true, + + .interface_modes = BIT(NL80211_IFTYPE_STATION), + .supports_monitor = false, + + .idle_ps = false, + .download_calib = false, + .supports_suspend = false, + .tcl_ring_retry = false, + .reoq_lut_support = false, + .supports_shadow_regs = true, + + .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850), + .num_tcl_banks = 7, + .max_tx_ring = 3, + + .mhi_config = &ath12k_mhi_config_wcn7850, + + .wmi_init = ath12k_wmi_init_wcn7850, + + .hal_ops = &hal_wcn7850_ops, + }, + { + .name = "qcn9274 hw2.0", + .hw_rev = ATH12K_HW_QCN9274_HW20, + .fw = { + .dir = "QCN9274/hw2.0", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, + .max_radios = 1, + .single_pdev_only = false, + .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274, + .internal_sleep_clock = false, + + .hw_ops = &qcn9274_ops, + .ring_mask = &ath12k_hw_ring_mask_qcn9274, + .regs = &qcn9274_v2_regs, + + .host_ce_config = ath12k_host_ce_config_qcn9274, + .ce_count = 16, + .target_ce_config = ath12k_target_ce_config_wlan_qcn9274, + .target_ce_count = 12, + .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274, + .svc_to_ce_map_len = 18, + + .hal_params = &ath12k_hw_hal_params_qcn9274, + + .rxdma1_enable = false, + .num_rxmda_per_pdev = 1, + .num_rxdma_dst_ring = 0, + .rx_mac_buf_ring = false, + .vdev_start_delay = false, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .supports_monitor = false, + + .idle_ps = false, + .download_calib = true, + .supports_suspend = false, + .tcl_ring_retry = true, + .reoq_lut_support = false, + .supports_shadow_regs = false, + + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274), + .num_tcl_banks = 48, + .max_tx_ring = 4, + + .mhi_config = &ath12k_mhi_config_qcn9274, + + .wmi_init = ath12k_wmi_init_qcn9274, + + .hal_ops = &hal_qcn9274_ops, + }, +}; + +int ath12k_hw_init(struct ath12k_base *ab) +{ + const struct ath12k_hw_params *hw_params = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(ath12k_hw_params); i++) { + hw_params = &ath12k_hw_params[i]; + + if (hw_params->hw_rev == ab->hw_rev) + break; + } + + if (i == ARRAY_SIZE(ath12k_hw_params)) { + ath12k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev); + return -EINVAL; + } + + ab->hw_params = hw_params; + + ath12k_info(ab, "Hardware name: %s\n", ab->hw_params->name); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h new file mode 100644 index 000000000000..e3461004188b --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_HW_H +#define ATH12K_HW_H + +#include <linux/mhi.h> + +#include "wmi.h" +#include "hal.h" + +/* Target configuration defines */ + +/* Num VDEVS per radio */ +#define TARGET_NUM_VDEVS (16 + 1) + +#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS) + +/* Num of peers for Single Radio mode */ +#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV) + +/* Num of peers for DBS */ +#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV) + +/* Num of peers for DBS_SBS */ +#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV) + +/* Max num of stations (per radio) */ +#define TARGET_NUM_STATIONS 512 + +#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x +#define TARGET_NUM_PEER_KEYS 2 +#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \ + 4 * TARGET_NUM_VDEVS + 8) + +#define TARGET_AST_SKID_LIMIT 16 +#define TARGET_NUM_OFFLD_PEERS 4 +#define TARGET_NUM_OFFLD_REORDER_BUFFS 4 + +#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4)) +#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4)) +#define TARGET_RX_TIMEOUT_LO_PRI 100 +#define TARGET_RX_TIMEOUT_HI_PRI 40 + +#define TARGET_DECAP_MODE_RAW 0 +#define TARGET_DECAP_MODE_NATIVE_WIFI 1 +#define TARGET_DECAP_MODE_ETH 2 + +#define TARGET_SCAN_MAX_PENDING_REQS 4 +#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 +#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 +#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8 +#define TARGET_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_NUM_MCAST_GROUPS 12 +#define TARGET_NUM_MCAST_TABLE_ELEMS 64 +#define TARGET_MCAST2UCAST_MODE 2 +#define TARGET_TX_DBG_LOG_SIZE 1024 +#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_VOW_CONFIG 0 +#define TARGET_NUM_MSDU_DESC (2500) +#define TARGET_MAX_FRAG_ENTRIES 6 +#define TARGET_MAX_BCN_OFFLD 16 +#define TARGET_NUM_WDS_ENTRIES 32 +#define TARGET_DMA_BURST_SIZE 1 +#define TARGET_RX_BATCHMODE 1 + +#define ATH12K_HW_MAX_QUEUES 4 +#define ATH12K_QUEUE_LEN 4096 + +#define ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4 + +#define ATH12K_FW_DIR "ath12k" + +#define ATH12K_BOARD_MAGIC "QCA-ATH12K-BOARD" +#define ATH12K_BOARD_API2_FILE "board-2.bin" +#define ATH12K_DEFAULT_BOARD_FILE "board.bin" +#define ATH12K_DEFAULT_CAL_FILE "caldata.bin" +#define ATH12K_AMSS_FILE "amss.bin" +#define ATH12K_M3_FILE "m3.bin" +#define ATH12K_REGDB_FILE_NAME "regdb.bin" + +enum ath12k_hw_rate_cck { + ATH12K_HW_RATE_CCK_LP_11M = 0, + ATH12K_HW_RATE_CCK_LP_5_5M, + ATH12K_HW_RATE_CCK_LP_2M, + ATH12K_HW_RATE_CCK_LP_1M, + ATH12K_HW_RATE_CCK_SP_11M, + ATH12K_HW_RATE_CCK_SP_5_5M, + ATH12K_HW_RATE_CCK_SP_2M, +}; + +enum ath12k_hw_rate_ofdm { + ATH12K_HW_RATE_OFDM_48M = 0, + ATH12K_HW_RATE_OFDM_24M, + ATH12K_HW_RATE_OFDM_12M, + ATH12K_HW_RATE_OFDM_6M, + ATH12K_HW_RATE_OFDM_54M, + ATH12K_HW_RATE_OFDM_36M, + ATH12K_HW_RATE_OFDM_18M, + ATH12K_HW_RATE_OFDM_9M, +}; + +enum ath12k_bus { + ATH12K_BUS_PCI, +}; + +#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11 + +struct hal_rx_desc; +struct hal_tcl_data_cmd; +struct htt_rx_ring_tlv_filter; +enum hal_encrypt_type; + +struct ath12k_hw_ring_mask { + u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX]; +}; + +struct ath12k_hw_hal_params { + enum hal_rx_buf_return_buf_manager rx_buf_rbm; + u32 wbm2sw_cc_enable; +}; + +struct ath12k_hw_params { + const char *name; + u16 hw_rev; + + struct { + const char *dir; + size_t board_size; + size_t cal_offset; + } fw; + + u8 max_radios; + bool single_pdev_only:1; + u32 qmi_service_ins_id; + bool internal_sleep_clock:1; + + const struct ath12k_hw_ops *hw_ops; + const struct ath12k_hw_ring_mask *ring_mask; + const struct ath12k_hw_regs *regs; + + const struct ce_attr *host_ce_config; + u32 ce_count; + const struct ce_pipe_config *target_ce_config; + u32 target_ce_count; + const struct service_to_pipe *svc_to_ce_map; + u32 svc_to_ce_map_len; + + const struct ath12k_hw_hal_params *hal_params; + + bool rxdma1_enable:1; + int num_rxmda_per_pdev; + int num_rxdma_dst_ring; + bool rx_mac_buf_ring:1; + bool vdev_start_delay:1; + + u16 interface_modes; + bool supports_monitor:1; + + bool idle_ps:1; + bool download_calib:1; + bool supports_suspend:1; + bool tcl_ring_retry:1; + bool reoq_lut_support:1; + bool supports_shadow_regs:1; + + u32 hal_desc_sz; + u32 num_tcl_banks; + u32 max_tx_ring; + + const struct mhi_controller_config *mhi_config; + + void (*wmi_init)(struct ath12k_base *ab, + struct ath12k_wmi_resource_config_arg *config); + + const struct hal_ops *hal_ops; +}; + +struct ath12k_hw_ops { + u8 (*get_hw_mac_from_pdev_id)(int pdev_id); + int (*mac_id_to_pdev_id)(const struct ath12k_hw_params *hw, int mac_id); + int (*mac_id_to_srng_id)(const struct ath12k_hw_params *hw, int mac_id); + int (*rxdma_ring_sel_config)(struct ath12k_base *ab); + u8 (*get_ring_selector)(struct sk_buff *skb); + bool (*dp_srng_is_tx_comp_ring)(int ring_num); +}; + +static inline +int ath12k_hw_get_mac_from_pdev_id(const struct ath12k_hw_params *hw, + int pdev_idx) +{ + if (hw->hw_ops->get_hw_mac_from_pdev_id) + return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx); + + return 0; +} + +static inline int ath12k_hw_mac_id_to_pdev_id(const struct ath12k_hw_params *hw, + int mac_id) +{ + if (hw->hw_ops->mac_id_to_pdev_id) + return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id); + + return 0; +} + +static inline int ath12k_hw_mac_id_to_srng_id(const struct ath12k_hw_params *hw, + int mac_id) +{ + if (hw->hw_ops->mac_id_to_srng_id) + return hw->hw_ops->mac_id_to_srng_id(hw, mac_id); + + return 0; +} + +struct ath12k_fw_ie { + __le32 id; + __le32 len; + u8 data[]; +}; + +enum ath12k_bd_ie_board_type { + ATH12K_BD_IE_BOARD_NAME = 0, + ATH12K_BD_IE_BOARD_DATA = 1, +}; + +enum ath12k_bd_ie_type { + /* contains sub IEs of enum ath12k_bd_ie_board_type */ + ATH12K_BD_IE_BOARD = 0, + ATH12K_BD_IE_BOARD_EXT = 1, +}; + +struct ath12k_hw_regs { + u32 hal_tcl1_ring_id; + u32 hal_tcl1_ring_misc; + u32 hal_tcl1_ring_tp_addr_lsb; + u32 hal_tcl1_ring_tp_addr_msb; + u32 hal_tcl1_ring_consumer_int_setup_ix0; + u32 hal_tcl1_ring_consumer_int_setup_ix1; + u32 hal_tcl1_ring_msi1_base_lsb; + u32 hal_tcl1_ring_msi1_base_msb; + u32 hal_tcl1_ring_msi1_data; + u32 hal_tcl_ring_base_lsb; + + u32 hal_tcl_status_ring_base_lsb; + + u32 hal_wbm_idle_ring_base_lsb; + u32 hal_wbm_idle_ring_misc_addr; + u32 hal_wbm_r0_idle_list_cntl_addr; + u32 hal_wbm_r0_idle_list_size_addr; + u32 hal_wbm_scattered_ring_base_lsb; + u32 hal_wbm_scattered_ring_base_msb; + u32 hal_wbm_scattered_desc_head_info_ix0; + u32 hal_wbm_scattered_desc_head_info_ix1; + u32 hal_wbm_scattered_desc_tail_info_ix0; + u32 hal_wbm_scattered_desc_tail_info_ix1; + u32 hal_wbm_scattered_desc_ptr_hp_addr; + + u32 hal_wbm_sw_release_ring_base_lsb; + u32 hal_wbm_sw1_release_ring_base_lsb; + u32 hal_wbm0_release_ring_base_lsb; + u32 hal_wbm1_release_ring_base_lsb; + + u32 pcie_qserdes_sysclk_en_sel; + u32 pcie_pcs_osc_dtct_config_base; + + u32 hal_ppe_rel_ring_base; + + u32 hal_reo2_ring_base; + u32 hal_reo1_misc_ctrl_addr; + u32 hal_reo1_sw_cookie_cfg0; + u32 hal_reo1_sw_cookie_cfg1; + u32 hal_reo1_qdesc_lut_base0; + u32 hal_reo1_qdesc_lut_base1; + u32 hal_reo1_ring_base_lsb; + u32 hal_reo1_ring_base_msb; + u32 hal_reo1_ring_id; + u32 hal_reo1_ring_misc; + u32 hal_reo1_ring_hp_addr_lsb; + u32 hal_reo1_ring_hp_addr_msb; + u32 hal_reo1_ring_producer_int_setup; + u32 hal_reo1_ring_msi1_base_lsb; + u32 hal_reo1_ring_msi1_base_msb; + u32 hal_reo1_ring_msi1_data; + u32 hal_reo1_aging_thres_ix0; + u32 hal_reo1_aging_thres_ix1; + u32 hal_reo1_aging_thres_ix2; + u32 hal_reo1_aging_thres_ix3; + + u32 hal_reo2_sw0_ring_base; + + u32 hal_sw2reo_ring_base; + u32 hal_sw2reo1_ring_base; + + u32 hal_reo_cmd_ring_base; + + u32 hal_reo_status_ring_base; +}; + +int ath12k_hw_init(struct ath12k_base *ab); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c new file mode 100644 index 000000000000..bf7e5b6977b2 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -0,0 +1,7038 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include "mac.h" +#include "core.h" +#include "debug.h" +#include "wmi.h" +#include "hw.h" +#include "dp_tx.h" +#include "dp_rx.h" +#include "peer.h" + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_5GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN6G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_6GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static const struct ieee80211_channel ath12k_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static const struct ieee80211_channel ath12k_5ghz_channels[] = { + CHAN5G(36, 5180, 0), + CHAN5G(40, 5200, 0), + CHAN5G(44, 5220, 0), + CHAN5G(48, 5240, 0), + CHAN5G(52, 5260, 0), + CHAN5G(56, 5280, 0), + CHAN5G(60, 5300, 0), + CHAN5G(64, 5320, 0), + CHAN5G(100, 5500, 0), + CHAN5G(104, 5520, 0), + CHAN5G(108, 5540, 0), + CHAN5G(112, 5560, 0), + CHAN5G(116, 5580, 0), + CHAN5G(120, 5600, 0), + CHAN5G(124, 5620, 0), + CHAN5G(128, 5640, 0), + CHAN5G(132, 5660, 0), + CHAN5G(136, 5680, 0), + CHAN5G(140, 5700, 0), + CHAN5G(144, 5720, 0), + CHAN5G(149, 5745, 0), + CHAN5G(153, 5765, 0), + CHAN5G(157, 5785, 0), + CHAN5G(161, 5805, 0), + CHAN5G(165, 5825, 0), + CHAN5G(169, 5845, 0), + CHAN5G(173, 5865, 0), +}; + +static const struct ieee80211_channel ath12k_6ghz_channels[] = { + CHAN6G(1, 5955, 0), + CHAN6G(5, 5975, 0), + CHAN6G(9, 5995, 0), + CHAN6G(13, 6015, 0), + CHAN6G(17, 6035, 0), + CHAN6G(21, 6055, 0), + CHAN6G(25, 6075, 0), + CHAN6G(29, 6095, 0), + CHAN6G(33, 6115, 0), + CHAN6G(37, 6135, 0), + CHAN6G(41, 6155, 0), + CHAN6G(45, 6175, 0), + CHAN6G(49, 6195, 0), + CHAN6G(53, 6215, 0), + CHAN6G(57, 6235, 0), + CHAN6G(61, 6255, 0), + CHAN6G(65, 6275, 0), + CHAN6G(69, 6295, 0), + CHAN6G(73, 6315, 0), + CHAN6G(77, 6335, 0), + CHAN6G(81, 6355, 0), + CHAN6G(85, 6375, 0), + CHAN6G(89, 6395, 0), + CHAN6G(93, 6415, 0), + CHAN6G(97, 6435, 0), + CHAN6G(101, 6455, 0), + CHAN6G(105, 6475, 0), + CHAN6G(109, 6495, 0), + CHAN6G(113, 6515, 0), + CHAN6G(117, 6535, 0), + CHAN6G(121, 6555, 0), + CHAN6G(125, 6575, 0), + CHAN6G(129, 6595, 0), + CHAN6G(133, 6615, 0), + CHAN6G(137, 6635, 0), + CHAN6G(141, 6655, 0), + CHAN6G(145, 6675, 0), + CHAN6G(149, 6695, 0), + CHAN6G(153, 6715, 0), + CHAN6G(157, 6735, 0), + CHAN6G(161, 6755, 0), + CHAN6G(165, 6775, 0), + CHAN6G(169, 6795, 0), + CHAN6G(173, 6815, 0), + CHAN6G(177, 6835, 0), + CHAN6G(181, 6855, 0), + CHAN6G(185, 6875, 0), + CHAN6G(189, 6895, 0), + CHAN6G(193, 6915, 0), + CHAN6G(197, 6935, 0), + CHAN6G(201, 6955, 0), + CHAN6G(205, 6975, 0), + CHAN6G(209, 6995, 0), + CHAN6G(213, 7015, 0), + CHAN6G(217, 7035, 0), + CHAN6G(221, 7055, 0), + CHAN6G(225, 7075, 0), + CHAN6G(229, 7095, 0), + CHAN6G(233, 7115, 0), +}; + +static struct ieee80211_rate ath12k_legacy_rates[] = { + { .bitrate = 10, + .hw_value = ATH12K_HW_RATE_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH12K_HW_RATE_CCK_LP_2M, + .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M, + .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH12K_HW_RATE_CCK_LP_11M, + .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M }, +}; + +static const int +ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = { + [NL80211_BAND_2GHZ] = { + [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G, + [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G, + [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G, + [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G, + [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, + }, + [NL80211_BAND_5GHZ] = { + [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, + [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, + [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, + [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, + }, + [NL80211_BAND_6GHZ] = { + [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, + [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, + [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, + [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, + }, + +}; + +const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = { + .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, + .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, + .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, + .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, + .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 | + HTT_RX_FP_CTRL_FILTER_FLASG3 +}; + +#define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4 +#define ath12k_g_rates ath12k_legacy_rates +#define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates)) +#define ath12k_a_rates (ath12k_legacy_rates + 4) +#define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4) + +#define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */ + +static const u32 ath12k_smps_map[] = { + [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, + [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, + [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, + [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, +}; + +static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) +{ + switch (mode) { + case MODE_11A: + return "11a"; + case MODE_11G: + return "11g"; + case MODE_11B: + return "11b"; + case MODE_11GONLY: + return "11gonly"; + case MODE_11NA_HT20: + return "11na-ht20"; + case MODE_11NG_HT20: + return "11ng-ht20"; + case MODE_11NA_HT40: + return "11na-ht40"; + case MODE_11NG_HT40: + return "11ng-ht40"; + case MODE_11AC_VHT20: + return "11ac-vht20"; + case MODE_11AC_VHT40: + return "11ac-vht40"; + case MODE_11AC_VHT80: + return "11ac-vht80"; + case MODE_11AC_VHT160: + return "11ac-vht160"; + case MODE_11AC_VHT80_80: + return "11ac-vht80+80"; + case MODE_11AC_VHT20_2G: + return "11ac-vht20-2g"; + case MODE_11AC_VHT40_2G: + return "11ac-vht40-2g"; + case MODE_11AC_VHT80_2G: + return "11ac-vht80-2g"; + case MODE_11AX_HE20: + return "11ax-he20"; + case MODE_11AX_HE40: + return "11ax-he40"; + case MODE_11AX_HE80: + return "11ax-he80"; + case MODE_11AX_HE80_80: + return "11ax-he80+80"; + case MODE_11AX_HE160: + return "11ax-he160"; + case MODE_11AX_HE20_2G: + return "11ax-he20-2g"; + case MODE_11AX_HE40_2G: + return "11ax-he40-2g"; + case MODE_11AX_HE80_2G: + return "11ax-he80-2g"; + case MODE_UNKNOWN: + /* skip */ + break; + + /* no default handler to allow compiler to check that the + * enum is fully handled + */ + } + + return "<unknown>"; +} + +enum rate_info_bw +ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) +{ + u8 ret = RATE_INFO_BW_20; + + switch (bw) { + case ATH12K_BW_20: + ret = RATE_INFO_BW_20; + break; + case ATH12K_BW_40: + ret = RATE_INFO_BW_40; + break; + case ATH12K_BW_80: + ret = RATE_INFO_BW_80; + break; + case ATH12K_BW_160: + ret = RATE_INFO_BW_160; + break; + } + + return ret; +} + +enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw) +{ + switch (bw) { + case RATE_INFO_BW_20: + return ATH12K_BW_20; + case RATE_INFO_BW_40: + return ATH12K_BW_40; + case RATE_INFO_BW_80: + return ATH12K_BW_80; + case RATE_INFO_BW_160: + return ATH12K_BW_160; + default: + return ATH12K_BW_20; + } +} + +int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, + u16 *rate) +{ + /* As default, it is OFDM rates */ + int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX; + int max_rates_idx = ath12k_g_rates_size; + + if (preamble == WMI_RATE_PREAMBLE_CCK) { + hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK; + i = 0; + max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX; + } + + while (i < max_rates_idx) { + if (hw_rc == ath12k_legacy_rates[i].hw_value) { + *rateidx = i; + *rate = ath12k_legacy_rates[i].bitrate; + return 0; + } + i++; + } + + return -EINVAL; +} + +u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate) +{ + int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (sband->bitrates[i].bitrate == bitrate) + return i; + + return 0; +} + +static u32 +ath12k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) +{ + int nss; + + for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) + if (ht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u32 +ath12k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) +{ + int nss; + + for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) + if (vht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u8 ath12k_parse_mpdudensity(u8 mpdudensity) +{ +/* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing": + * 0 for no restriction + * 1 for 1/4 us + * 2 for 1/2 us + * 3 for 1 us + * 4 for 2 us + * 5 for 4 us + * 6 for 8 us + * 7 for 16 us + */ + switch (mpdudensity) { + case 0: + return 0; + case 1: + case 2: + case 3: + /* Our lower layer calculations limit our precision to + * 1 microsecond + */ + return 1; + case 4: + return 2; + case 5: + return 4; + case 6: + return 8; + case 7: + return 16; + default: + return 0; + } +} + +static int ath12k_mac_vif_chan(struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) +{ + struct ieee80211_chanctx_conf *conf; + + rcu_read_lock(); + conf = rcu_dereference(vif->bss_conf.chanctx_conf); + if (!conf) { + rcu_read_unlock(); + return -ENOENT; + } + + *def = conf->def; + rcu_read_unlock(); + + return 0; +} + +static bool ath12k_mac_bitrate_is_cck(int bitrate) +{ + switch (bitrate) { + case 10: + case 20: + case 55: + case 110: + return true; + } + + return false; +} + +u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck) +{ + const struct ieee80211_rate *rate; + int i; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck) + continue; + + if (rate->hw_value == hw_rate) + return i; + else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && + rate->hw_value_short == hw_rate) + return i; + } + + return 0; +} + +static u8 ath12k_mac_bitrate_to_rate(int bitrate) +{ + return DIV_ROUND_UP(bitrate, 5) | + (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); +} + +static void ath12k_get_arvif_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath12k_vif_iter *arvif_iter = data; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + + if (arvif->vdev_id == arvif_iter->vdev_id) + arvif_iter->arvif = arvif; +} + +struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) +{ + struct ath12k_vif_iter arvif_iter = {}; + u32 flags; + + arvif_iter.vdev_id = vdev_id; + + flags = IEEE80211_IFACE_ITER_RESUME_ALL; + ieee80211_iterate_active_interfaces_atomic(ar->hw, + flags, + ath12k_get_arvif_iter, + &arvif_iter); + if (!arvif_iter.arvif) { + ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id); + return NULL; + } + + return arvif_iter.arvif; +} + +struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id) +{ + int i; + struct ath12k_pdev *pdev; + struct ath12k_vif *arvif; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id); + if (arvif) + return arvif; + } + } + + return NULL; +} + +struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id) +{ + int i; + struct ath12k_pdev *pdev; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + if (pdev->ar->allocated_vdev_map & (1LL << vdev_id)) + return pdev->ar; + } + } + + return NULL; +} + +struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) +{ + int i; + struct ath12k_pdev *pdev; + + if (ab->hw_params->single_pdev_only) { + pdev = rcu_dereference(ab->pdevs_active[0]); + return pdev ? pdev->ar : NULL; + } + + if (WARN_ON(pdev_id > ab->num_radios)) + return NULL; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + + if (pdev && pdev->pdev_id == pdev_id) + return (pdev->ar ? pdev->ar : NULL); + } + + return NULL; +} + +static void ath12k_pdev_caps_update(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + + ar->max_tx_power = ab->target_caps.hw_max_tx_power; + + /* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power. + * But since the received value in svcrdy is same as hw_max_tx_power, + * we can set ar->min_tx_power to 0 currently until + * this is fixed in firmware + */ + ar->min_tx_power = 0; + + ar->txpower_limit_2g = ar->max_tx_power; + ar->txpower_limit_5g = ar->max_tx_power; + ar->txpower_scale = WMI_HOST_TP_SCALE_MAX; +} + +static int ath12k_mac_txpower_recalc(struct ath12k *ar) +{ + struct ath12k_pdev *pdev = ar->pdev; + struct ath12k_vif *arvif; + int ret, txpower = -1; + u32 param; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->txpower <= 0) + continue; + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (txpower == -1) + return 0; + + /* txpwr is set as 2 units per dBm in FW*/ + txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), + ar->max_tx_power) * 2; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n", + txpower / 2); + + if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) && + ar->txpower_limit_2g != txpower) { + param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G; + ret = ath12k_wmi_pdev_set_param(ar, param, + txpower, ar->pdev->pdev_id); + if (ret) + goto fail; + ar->txpower_limit_2g = txpower; + } + + if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) && + ar->txpower_limit_5g != txpower) { + param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G; + ret = ath12k_wmi_pdev_set_param(ar, param, + txpower, ar->pdev->pdev_id); + if (ret) + goto fail; + ar->txpower_limit_5g = txpower; + } + + return 0; + +fail: + ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n", + txpower / 2, param, ret); + return ret; +} + +static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + u32 vdev_param, rts_cts; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; + + /* Enable RTS/CTS protection for sw retries (when legacy stations + * are in BSS) or by default only for second rate series. + * TODO: Check if we need to enable CTS 2 Self in any case + */ + rts_cts = WMI_USE_RTS_CTS; + + if (arvif->num_legacy_stations > 0) + rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4; + else + rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4; + + /* Need not send duplicate param value to firmware */ + if (arvif->rtscts_prot_mode == rts_cts) + return 0; + + arvif->rtscts_prot_mode = rts_cts; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", + arvif->vdev_id, rts_cts); + + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rts_cts); + if (ret) + ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + + return ret; +} + +static int ath12k_mac_set_kickout(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + u32 param; + int ret; + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH, + ATH12K_KICKOUT_THRESHOLD, + ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ATH12K_KEEPALIVE_MIN_IDLE); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ATH12K_KEEPALIVE_MAX_IDLE); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ATH12K_KEEPALIVE_MAX_UNRESPONSIVE); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +void ath12k_mac_peer_cleanup_all(struct ath12k *ar) +{ + struct ath12k_peer *peer, *tmp; + struct ath12k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + list_for_each_entry_safe(peer, tmp, &ab->peers, list) { + ath12k_dp_rx_peer_tid_cleanup(ar, peer); + list_del(&peer->list); + kfree(peer); + } + spin_unlock_bh(&ab->base_lock); + + ar->num_peers = 0; + ar->num_stations = 0; +} + +static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + return -ESHUTDOWN; + + if (!wait_for_completion_timeout(&ar->vdev_setup_done, + ATH12K_VDEV_SETUP_TIMEOUT_HZ)) + return -ETIMEDOUT; + + return ar->last_wmi_vdev_start_status ? -EINVAL : 0; +} + +static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id) +{ + int ret; + + ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + if (ret) { + ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", + vdev_id, ret); + return ret; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", + vdev_id); + return 0; +} + +static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_channel *channel; + struct wmi_vdev_start_req_arg arg = {}; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + channel = chandef->chan; + arg.vdev_id = vdev_id; + arg.freq = channel->center_freq; + arg.band_center_freq1 = chandef->center_freq1; + arg.band_center_freq2 = chandef->center_freq2; + arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; + arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); + + arg.min_power = 0; + arg.max_power = channel->max_power; + arg.max_reg_power = channel->max_reg_power; + arg.max_antenna_gain = channel->max_antenna_gain; + + arg.pref_tx_streams = ar->num_tx_chains; + arg.pref_rx_streams = ar->num_rx_chains; + + arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); + + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); + + ret = ath12k_wmi_vdev_start(ar, &arg, false); + if (ret) { + ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", + vdev_id, ret); + return ret; + } + + ret = ath12k_mac_vdev_setup_sync(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", + vdev_id, ret); + return ret; + } + + ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + if (ret) { + ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", + vdev_id, ret); + goto vdev_stop; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", + vdev_id); + return 0; + +vdev_stop: + ret = ath12k_wmi_vdev_stop(ar, vdev_id); + if (ret) + ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", + vdev_id, ret); + return ret; +} + +static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id); + if (ret) + ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); + + ret = ath12k_mac_vdev_setup_sync(ar); + if (ret) + ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); + + ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id); + if (ret) + ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", + ar->monitor_vdev_id, ret); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n", + ar->monitor_vdev_id); + return ret; +} + +static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) +{ + struct ath12k_pdev *pdev = ar->pdev; + struct ath12k_wmi_vdev_create_arg arg = {}; + int bit, ret; + u8 tmp_addr[6]; + u16 nss; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->monitor_vdev_created) + return 0; + + if (ar->ab->free_vdev_map == 0) { + ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); + return -ENOMEM; + } + + bit = __ffs64(ar->ab->free_vdev_map); + + ar->monitor_vdev_id = bit; + + arg.if_id = ar->monitor_vdev_id; + arg.type = WMI_VDEV_TYPE_MONITOR; + arg.subtype = WMI_VDEV_SUBTYPE_NONE; + arg.pdev_id = pdev->pdev_id; + arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID; + + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { + arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; + arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; + } + + if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { + arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; + arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; + } + + ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", + ar->monitor_vdev_id, ret); + ar->monitor_vdev_id = -1; + return ret; + } + + nss = hweight32(ar->cfg_tx_chainmask) ? : 1; + ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, + WMI_VDEV_PARAM_NSS, nss); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", + ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); + return ret; + } + + ret = ath12k_mac_txpower_recalc(ar); + if (ret) + return ret; + + ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; + ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); + ar->num_created_vdevs++; + ar->monitor_vdev_created = true; + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n", + ar->monitor_vdev_id); + + return 0; +} + +static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + if (!ar->monitor_vdev_created) + return 0; + + reinit_completion(&ar->vdev_delete_done); + + ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->vdev_delete_done, + ATH12K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) { + ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); + } else { + ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); + ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n", + ar->monitor_vdev_id); + ar->num_created_vdevs--; + ar->monitor_vdev_id = -1; + ar->monitor_vdev_created = false; + } + + return ret; +} + +static void +ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + struct cfg80211_chan_def **def = data; + + *def = &conf->def; +} + +static int ath12k_mac_monitor_start(struct ath12k *ar) +{ + struct cfg80211_chan_def *chandef = NULL; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->monitor_started) + return 0; + + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath12k_mac_get_any_chandef_iter, + &chandef); + if (!chandef) + return 0; + + ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); + if (ret) { + ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); + ath12k_mac_monitor_vdev_delete(ar); + return ret; + } + + ar->monitor_started = true; + ar->num_started_vdevs++; + ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret); + + return ret; +} + +static int ath12k_mac_monitor_stop(struct ath12k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (!ar->monitor_started) + return 0; + + ret = ath12k_mac_monitor_vdev_stop(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); + return ret; + } + + ar->monitor_started = false; + ar->num_started_vdevs--; + ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret); + return ret; +} + +static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath12k *ar = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR; + if (ar->monitor_conf_enabled) { + if (ar->monitor_vdev_created) + goto exit; + ret = ath12k_mac_monitor_vdev_create(ar); + if (ret) + goto exit; + ret = ath12k_mac_monitor_start(ar); + if (ret) + goto err_mon_del; + } else { + if (!ar->monitor_vdev_created) + goto exit; + ret = ath12k_mac_monitor_stop(ar); + if (ret) + goto exit; + ath12k_mac_monitor_vdev_delete(ar); + } + } + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; + +err_mon_del: + ath12k_mac_monitor_vdev_delete(ar); + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_mutable_offsets offs = {}; + struct sk_buff *bcn; + struct ieee80211_mgmt *mgmt; + u8 *ies; + int ret; + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + return 0; + + bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); + if (!bcn) { + ath12k_warn(ab, "failed to get beacon template from mac80211\n"); + return -EPERM; + } + + ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); + ies += sizeof(mgmt->u.beacon); + + if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) + arvif->rsnie_present = true; + + if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies, (skb_tail_pointer(bcn) - ies))) + arvif->wpaie_present = true; + + ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); + + kfree_skb(bcn); + + if (ret) + ath12k_warn(ab, "failed to submit beacon template command: %d\n", + ret); + + return ret; +} + +static void ath12k_control_beaconing(struct ath12k_vif *arvif, + struct ieee80211_bss_conf *info) +{ + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (!info->enable_beacon) { + ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_up = false; + return; + } + + /* Install the beacon template to the FW */ + ret = ath12k_mac_setup_bcn_tmpl(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n", + ret); + return; + } + + arvif->aid = 0; + + ether_addr_copy(arvif->bssid, info->bssid); + + ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n", + arvif->vdev_id, ret); + return; + } + + arvif->is_up = true; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); +} + +static void ath12k_peer_assoc_h_basic(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + u32 aid; + + lockdep_assert_held(&ar->conf_mutex); + + if (vif->type == NL80211_IFTYPE_STATION) + aid = vif->cfg.aid; + else + aid = sta->aid; + + ether_addr_copy(arg->peer_mac, sta->addr); + arg->vdev_id = arvif->vdev_id; + arg->peer_associd = aid; + arg->auth_flag = true; + /* TODO: STA WAR in ath10k for listen interval required? */ + arg->peer_listen_intval = ar->hw->conf.listen_interval; + arg->peer_nss = 1; + arg->peer_caps = vif->bss_conf.assoc_capability; +} + +static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ieee80211_bss_conf *info = &vif->bss_conf; + struct cfg80211_chan_def def; + struct cfg80211_bss *bss; + struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + const u8 *rsnie = NULL; + const u8 *wpaie = NULL; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + + if (arvif->rsnie_present || arvif->wpaie_present) { + arg->need_ptk_4_way = true; + if (arvif->wpaie_present) + arg->need_gtk_2_way = true; + } else if (bss) { + const struct cfg80211_bss_ies *ies; + + rcu_read_lock(); + rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); + + ies = rcu_dereference(bss->ies); + + wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies->data, + ies->len); + rcu_read_unlock(); + cfg80211_put_bss(ar->hw->wiphy, bss); + } + + /* FIXME: base on RSN IE/WPA IE is a correct idea? */ + if (rsnie || wpaie) { + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "%s: rsn ie found\n", __func__); + arg->need_ptk_4_way = true; + } + + if (wpaie) { + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "%s: wpa ie found\n", __func__); + arg->need_gtk_2_way = true; + } + + if (sta->mfp) { + /* TODO: Need to check if FW supports PMF? */ + arg->is_pmf_enabled = true; + } + + /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */ +} + +static void ath12k_peer_assoc_h_rates(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; + struct cfg80211_chan_def def; + const struct ieee80211_supported_band *sband; + const struct ieee80211_rate *rates; + enum nl80211_band band; + u32 ratemask; + u8 rate; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + sband = ar->hw->wiphy->bands[band]; + ratemask = sta->deflink.supp_rates[band]; + ratemask &= arvif->bitrate_mask.control[band].legacy; + rates = sband->bitrates; + + rateset->num_rates = 0; + + for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { + if (!(ratemask & 1)) + continue; + + rate = ath12k_mac_bitrate_to_rate(rates->bitrate); + rateset->rates[rateset->num_rates] = rate; + rateset->num_rates++; + } +} + +static bool +ath12k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) +{ + int nss; + + for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) + if (ht_mcs_mask[nss]) + return false; + + return true; +} + +static bool +ath12k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) +{ + int nss; + + for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) + if (vht_mcs_mask[nss]) + return false; + + return true; +} + +static void ath12k_peer_assoc_h_ht(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + int i, n; + u8 max_nss; + u32 stbc; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + if (!ht_cap->ht_supported) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + + if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) + return; + + arg->ht_flag = true; + + arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + + ht_cap->ampdu_factor)) - 1; + + arg->peer_mpdu_density = + ath12k_parse_mpdudensity(ht_cap->ampdu_density); + + arg->peer_ht_caps = ht_cap->cap; + arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG; + + if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) + arg->ldpc_flag = true; + + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { + arg->bw_40 = true; + arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; + } + + if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { + if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40)) + arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG; + } + + if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { + arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG; + arg->stbc_flag = true; + } + + if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { + stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; + stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; + stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S; + arg->peer_rate_caps |= stbc; + arg->stbc_flag = true; + } + + if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) + arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG; + else if (ht_cap->mcs.rx_mask[1]) + arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG; + + for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) + if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && + (ht_mcs_mask[i / 8] & BIT(i % 8))) { + max_nss = (i / 8) + 1; + arg->peer_ht_rates.rates[n++] = i; + } + + /* This is a workaround for HT-enabled STAs which break the spec + * and have no HT capabilities RX mask (no HT RX MCS map). + * + * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), + * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. + * + * Firmware asserts if such situation occurs. + */ + if (n == 0) { + arg->peer_ht_rates.num_rates = 8; + for (i = 0; i < arg->peer_ht_rates.num_rates; i++) + arg->peer_ht_rates.rates[i] = i; + } else { + arg->peer_ht_rates.num_rates = n; + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", + arg->peer_mac, + arg->peer_ht_rates.num_rates, + arg->peer_nss); +} + +static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) +{ + switch ((mcs_map >> (2 * nss)) & 0x3) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; + case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; + case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; + } + return 0; +} + +static u16 +ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set, + const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) +{ + int idx_limit; + int nss; + u16 mcs_map; + u16 mcs; + + for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { + mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & + vht_mcs_limit[nss]; + + if (mcs_map) + idx_limit = fls(mcs_map) - 1; + else + idx_limit = -1; + + switch (idx_limit) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; + break; + case 8: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; + break; + case 9: + mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; + break; + default: + WARN_ON(1); + fallthrough; + case -1: + mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; + break; + } + + tx_mcs_set &= ~(0x3 << (nss * 2)); + tx_mcs_set |= mcs << (nss * 2); + } + + return tx_mcs_set; +} + +static void ath12k_peer_assoc_h_vht(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u16 *vht_mcs_mask; + u16 tx_mcs_map; + u8 ampdu_factor; + u8 max_nss, vht_mcs; + int i; + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + if (!vht_cap->vht_supported) + return; + + band = def.chan->band; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) + return; + + arg->vht_flag = true; + + /* TODO: similar flags required? */ + arg->vht_capable = true; + + if (def.chan->band == NL80211_BAND_2GHZ) + arg->vht_ng_flag = true; + + arg->peer_vht_caps = vht_cap->cap; + + ampdu_factor = (vht_cap->cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + + /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to + * zero in VHT IE. Using it would result in degraded throughput. + * arg->peer_max_mpdu at this point contains HT max_mpdu so keep + * it if VHT max_mpdu is smaller. + */ + arg->peer_max_mpdu = max(arg->peer_max_mpdu, + (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1); + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + arg->bw_80 = true; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + arg->bw_160 = true; + + /* Calculate peer NSS capability from VHT capabilities if STA + * supports VHT. + */ + for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { + vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> + (2 * i) & 3; + + if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED && + vht_mcs_mask[i]) + max_nss = i + 1; + } + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); + arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); + + tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask); + + /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default. + * VHT MCS rate 10 and 11 is not supported in 11ac standard. + * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. + */ + arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; + arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; + + if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) == + IEEE80211_VHT_MCS_NOT_SUPPORTED) + arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + /* TODO: Check */ + arg->tx_max_mcs_nss = 0xFF; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", + sta->addr, arg->peer_max_mpdu, arg->peer_flags); + + /* TODO: rxnss_override */ +} + +static void ath12k_peer_assoc_h_he(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + int i; + u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; + u16 mcs_160_map, mcs_80_map; + bool support_160; + u16 v; + + if (!he_cap->has_he) + return; + + arg->he_flag = true; + + support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); + + /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ + mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + + if (support_160) { + for (i = 7; i >= 0; i--) { + u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; + + if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) { + rx_mcs_160 = i + 1; + break; + } + } + } + + for (i = 7; i >= 0; i--) { + u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; + + if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) { + rx_mcs_80 = i + 1; + break; + } + } + + if (support_160) + max_nss = min(rx_mcs_80, rx_mcs_160); + else + max_nss = rx_mcs_80; + + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); + + memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, + sizeof(arg->peer_he_cap_macinfo)); + memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, + sizeof(arg->peer_he_cap_phyinfo)); + arg->peer_he_ops = vif->bss_conf.he_oper.params; + + /* the top most byte is used to indicate BSS color info */ + arg->peer_he_ops &= 0xffffff; + + /* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU + * Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu + * as calculated while parsing VHT caps(if VHT caps is present) + * or HT caps (if VHT caps is not present). + * + * For non-zero value of Max AMPDU Exponent Extension in HE MAC caps, + * if a HE STA sends VHT cap and HE cap IE in assoc request then, use + * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length. + * If a HE STA that does not send VHT cap, but HE and HT cap in assoc + * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu + * length. + */ + ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; + + if (ampdu_factor) { + if (sta->deflink.vht_cap.vht_supported) + arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1; + else if (sta->deflink.ht_cap.ht_supported) + arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1; + } + + if (he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + int bit = 7; + int nss, ru; + + arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK; + arg->peer_ppet.ru_bit_mask = + (he_cap->ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; + + for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { + for (ru = 0; ru < 4; ru++) { + u32 val = 0; + int i; + + if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0) + continue; + for (i = 0; i < 6; i++) { + val >>= 1; + val |= ((he_cap->ppe_thres[bit / 8] >> + (bit % 8)) & 0x1) << 5; + bit++; + } + arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= + val << (ru * 6); + } + } + } + + if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES) + arg->twt_responder = true; + if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) + arg->twt_requester = true; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_160: + if (he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); + arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); + arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; + + arg->peer_he_mcs_count++; + } + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); + arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; + + arg->peer_he_mcs_count++; + fallthrough; + + default: + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); + arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; + + arg->peer_he_mcs_count++; + break; + } +} + +static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + int smps; + + if (!ht_cap->ht_supported) + return; + + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + + switch (smps) { + case WLAN_HT_CAP_SM_PS_STATIC: + arg->static_mimops_flag = true; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + arg->dynamic_mimops_flag = true; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + arg->spatial_mux_flag = true; + break; + default: + break; + } +} + +static void ath12k_peer_assoc_h_qos(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_AP: + if (sta->wme) { + /* TODO: Check WME vs QoS */ + arg->is_wme_set = true; + arg->qos_flag = true; + } + + if (sta->wme && sta->uapsd_queues) { + /* TODO: Check WME vs QoS */ + arg->is_wme_set = true; + arg->apsd_flag = true; + arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG; + } + break; + case WMI_VDEV_TYPE_STA: + if (sta->wme) { + arg->is_wme_set = true; + arg->qos_flag = true; + } + break; + default: + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n", + sta->addr, arg->qos_flag); +} + +static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, + struct ath12k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath12k_wmi_ap_ps_arg arg; + u32 max_sp; + u32 uapsd; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + arg.vdev_id = arvif->vdev_id; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", + sta->uapsd_queues, sta->max_sp); + + uapsd = 0; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; + + max_sp = 0; + if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) + max_sp = sta->max_sp; + + arg.param = WMI_AP_PS_PEER_PARAM_UAPSD; + arg.value = uapsd; + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + if (ret) + goto err; + + arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP; + arg.value = max_sp; + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + if (ret) + goto err; + + /* TODO: revisit during testing */ + arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; + arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + if (ret) + goto err; + + arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; + arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); + if (ret) + goto err; + + return 0; + +err: + ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n", + arg.param, arvif->vdev_id, ret); + return ret; +} + +static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) +{ + return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> + ATH12K_MAC_FIRST_OFDM_RATE_IDX; +} + +static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, + struct ieee80211_sta *sta) +{ + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + switch (sta->deflink.vht_cap.cap & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + return MODE_11AC_VHT160; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + return MODE_11AC_VHT80_80; + default: + /* not sure if this is a valid case? */ + return MODE_11AC_VHT160; + } + } + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11AC_VHT80; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11AC_VHT40; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11AC_VHT20; + + return MODE_UNKNOWN; +} + +static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, + struct ieee80211_sta *sta) +{ + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + return MODE_11AX_HE160; + else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + return MODE_11AX_HE80_80; + /* not sure if this is a valid case? */ + return MODE_11AX_HE160; + } + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11AX_HE80; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11AX_HE40; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11AX_HE20; + + return MODE_UNKNOWN; +} + +static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + enum wmi_phy_mode phymode = MODE_UNKNOWN; + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + switch (band) { + case NL80211_BAND_2GHZ: + if (sta->deflink.he_cap.has_he) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + phymode = MODE_11AX_HE80_2G; + else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11AX_HE40_2G; + else + phymode = MODE_11AX_HE20_2G; + } else if (sta->deflink.vht_cap.vht_supported && + !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11AC_VHT40; + else + phymode = MODE_11AC_VHT20; + } else if (sta->deflink.ht_cap.ht_supported && + !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11NG_HT40; + else + phymode = MODE_11NG_HT20; + } else if (ath12k_mac_sta_has_ofdm_only(sta)) { + phymode = MODE_11G; + } else { + phymode = MODE_11B; + } + break; + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + /* Check HE first */ + if (sta->deflink.he_cap.has_he) { + phymode = ath12k_mac_get_phymode_he(ar, sta); + } else if (sta->deflink.vht_cap.vht_supported && + !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + phymode = ath12k_mac_get_phymode_vht(ar, sta); + } else if (sta->deflink.ht_cap.ht_supported && + !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) + phymode = MODE_11NA_HT40; + else + phymode = MODE_11NA_HT20; + } else { + phymode = MODE_11A; + } + break; + default: + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n", + sta->addr, ath12k_mac_phymode_str(phymode)); + + arg->peer_phymode = phymode; + WARN_ON(phymode == MODE_UNKNOWN); +} + +static void ath12k_peer_assoc_prepare(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg, + bool reassoc) +{ + lockdep_assert_held(&ar->conf_mutex); + + memset(arg, 0, sizeof(*arg)); + + reinit_completion(&ar->peer_assoc_done); + + arg->peer_new_assoc = !reassoc; + ath12k_peer_assoc_h_basic(ar, vif, sta, arg); + ath12k_peer_assoc_h_crypto(ar, vif, sta, arg); + ath12k_peer_assoc_h_rates(ar, vif, sta, arg); + ath12k_peer_assoc_h_ht(ar, vif, sta, arg); + ath12k_peer_assoc_h_vht(ar, vif, sta, arg); + ath12k_peer_assoc_h_he(ar, vif, sta, arg); + ath12k_peer_assoc_h_qos(ar, vif, sta, arg); + ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); + ath12k_peer_assoc_h_smps(sta, arg); + + /* TODO: amsdu_disable req? */ +} + +static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, + const u8 *addr, + const struct ieee80211_sta_ht_cap *ht_cap) +{ + int smps; + + if (!ht_cap->ht_supported) + return 0; + + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (smps >= ARRAY_SIZE(ath12k_smps_map)) + return -EINVAL; + + return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, + WMI_PEER_MIMO_PS_STATE, + ath12k_smps_map[smps]); +} + +static void ath12k_bss_assoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_wmi_peer_assoc_arg peer_arg; + struct ieee80211_sta *ap_sta; + struct ath12k_peer *peer; + bool is_auth = false; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", + arvif->vdev_id, arvif->bssid, arvif->aid); + + rcu_read_lock(); + + ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!ap_sta) { + ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", + bss_conf->bssid, arvif->vdev_id); + rcu_read_unlock(); + return; + } + + ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); + + rcu_read_unlock(); + + ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (ret) { + ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", + bss_conf->bssid, arvif->vdev_id, ret); + return; + } + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { + ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", + bss_conf->bssid, arvif->vdev_id); + return; + } + + ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, + &ap_sta->deflink.ht_cap); + if (ret) { + ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", + arvif->vdev_id, ret); + return; + } + + WARN_ON(arvif->is_up); + + arvif->aid = vif->cfg.aid; + ether_addr_copy(arvif->bssid, bss_conf->bssid); + + ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n", + arvif->vdev_id, ret); + return; + } + + arvif->is_up = true; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev %d up (associated) bssid %pM aid %d\n", + arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); + if (peer && peer->is_authorized) + is_auth = true; + + spin_unlock_bh(&ar->ab->base_lock); + + /* Authorize BSS Peer */ + if (is_auth) { + ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); + } + + ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, + &bss_conf->he_obss_pd); + if (ret) + ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", + arvif->vdev_id, ret); +} + +static void ath12k_bss_disassoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", + arvif->vdev_id, arvif->bssid); + + ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath12k_warn(ar->ab, "failed to down vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_up = false; + + /* TODO: cancel connection_loss_work */ +} + +static u32 ath12k_mac_get_rate_hw_value(int bitrate) +{ + u32 preamble; + u16 hw_value; + int rate; + size_t i; + + if (ath12k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) { + if (ath12k_legacy_rates[i].bitrate != bitrate) + continue; + + hw_value = ath12k_legacy_rates[i].hw_value; + rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); + + return rate; + } + + return -EINVAL; +} + +static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + const struct ieee80211_supported_band *sband; + u8 basic_rate_idx; + int hw_rate_code; + u32 vdev_param; + u16 bitrate; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + sband = ar->hw->wiphy->bands[def->chan->band]; + basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; + bitrate = sband->bitrates[basic_rate_idx].bitrate; + + hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate); + if (hw_rate_code < 0) { + ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate); + return; + } + + vdev_param = WMI_VDEV_PARAM_MGMT_RATE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); + + vdev_param = WMI_VDEV_PARAM_BEACON_RATE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); +} + +static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, + struct ieee80211_bss_conf *info) +{ + struct ath12k *ar = arvif->ar; + struct sk_buff *tmpl; + int ret; + u32 interval; + bool unsol_bcast_probe_resp_enabled = false; + + if (info->fils_discovery.max_interval) { + interval = info->fils_discovery.max_interval; + + tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); + if (tmpl) + ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, + tmpl); + } else if (info->unsol_bcast_probe_resp_interval) { + unsol_bcast_probe_resp_enabled = 1; + interval = info->unsol_bcast_probe_resp_interval; + + tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, + arvif->vif); + if (tmpl) + ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, + tmpl); + } else { /* Disable */ + return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); + } + + if (!tmpl) { + ath12k_warn(ar->ab, + "mac vdev %i failed to retrieve %s template\n", + arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? + "unsolicited broadcast probe response" : + "FILS discovery")); + return -EPERM; + } + kfree_skb(tmpl); + + if (!ret) + ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval, + unsol_bcast_probe_resp_enabled); + + return ret; +} + +static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct cfg80211_chan_def def; + u32 param_id, param_value; + enum nl80211_band band; + u32 vdev_param; + int mcast_rate; + u32 preamble; + u16 hw_value; + u16 bitrate; + int ret; + u8 rateidx; + u32 rate; + + mutex_lock(&ar->conf_mutex); + + if (changed & BSS_CHANGED_BEACON_INT) { + arvif->beacon_interval = info->beacon_int; + + param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + arvif->beacon_interval); + if (ret) + ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n", + arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Beacon interval: %d set for VDEV: %d\n", + arvif->beacon_interval, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_BEACON) { + param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; + param_value = WMI_BEACON_STAGGERED_MODE; + ret = ath12k_wmi_pdev_set_param(ar, param_id, + param_value, ar->pdev->pdev_id); + if (ret) + ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n", + arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Set staggered beacon mode for VDEV: %d\n", + arvif->vdev_id); + + ret = ath12k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath12k_warn(ar->ab, "failed to update bcn template: %d\n", + ret); + } + + if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { + arvif->dtim_period = info->dtim_period; + + param_id = WMI_VDEV_PARAM_DTIM_PERIOD; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + arvif->dtim_period); + + if (ret) + ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n", + arvif->vdev_id, ret); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "DTIM period: %d set for VDEV: %d\n", + arvif->dtim_period, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_SSID && + vif->type == NL80211_IFTYPE_AP) { + arvif->u.ap.ssid_len = vif->cfg.ssid_len; + if (vif->cfg.ssid_len) + memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); + arvif->u.ap.hidden_ssid = info->hidden_ssid; + } + + if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + ath12k_control_beaconing(arvif, info); + + if (arvif->is_up && vif->bss_conf.he_support && + vif->bss_conf.he_oper.params) { + /* TODO: Extend to support 1024 BA Bitmap size */ + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BA_MODE, + WMI_BA_MODE_BUFFER_SIZE_256); + if (ret) + ath12k_warn(ar->ab, + "failed to set BA BUFFER SIZE 256 for vdev: %d\n", + arvif->vdev_id); + + param_id = WMI_VDEV_PARAM_HEOPS_0_31; + param_value = vif->bss_conf.he_oper.params; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "he oper param: %x set for VDEV: %d\n", + param_value, arvif->vdev_id); + + if (ret) + ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", + param_value, arvif->vdev_id, ret); + } + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + u32 cts_prot; + + cts_prot = !!(info->use_cts_prot); + param_id = WMI_VDEV_PARAM_PROTECTION_MODE; + + if (arvif->is_started) { + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, cts_prot); + if (ret) + ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n", + arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n", + cts_prot, arvif->vdev_id); + } else { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n"); + } + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + u32 slottime; + + if (info->use_short_slot) + slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ + + else + slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ + + param_id = WMI_VDEV_PARAM_SLOT_TIME; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, slottime); + if (ret) + ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n", + arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Set slottime: %d for VDEV: %d\n", + slottime, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + u32 preamble; + + if (info->use_short_preamble) + preamble = WMI_VDEV_PREAMBLE_SHORT; + else + preamble = WMI_VDEV_PREAMBLE_LONG; + + param_id = WMI_VDEV_PARAM_PREAMBLE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, preamble); + if (ret) + ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n", + arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Set preamble: %d for VDEV: %d\n", + preamble, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_ASSOC) { + if (vif->cfg.assoc) + ath12k_bss_assoc(hw, vif, info); + else + ath12k_bss_disassoc(hw, vif); + } + + if (changed & BSS_CHANGED_TXPOWER) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ath12k_mac_txpower_recalc(ar); + } + + if (changed & BSS_CHANGED_MCAST_RATE && + !ath12k_mac_vif_chan(arvif->vif, &def)) { + band = def.chan->band; + mcast_rate = vif->bss_conf.mcast_rate[band]; + + if (mcast_rate > 0) + rateidx = mcast_rate - 1; + else + rateidx = ffs(vif->bss_conf.basic_rates) - 1; + + if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) + rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; + + bitrate = ath12k_legacy_rates[rateidx].bitrate; + hw_value = ath12k_legacy_rates[rateidx].hw_value; + + if (ath12k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev %d mcast_rate %x\n", + arvif->vdev_id, rate); + + vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath12k_warn(ar->ab, + "failed to set mcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + + vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath12k_warn(ar->ab, + "failed to set bcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + } + + if (changed & BSS_CHANGED_BASIC_RATES && + !ath12k_mac_vif_chan(arvif->vif, &def)) + ath12k_recalculate_mgmt_rate(ar, vif, &def); + + if (changed & BSS_CHANGED_TWT) { + if (info->twt_requester || info->twt_responder) + ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id); + else + ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); + } + + if (changed & BSS_CHANGED_HE_OBSS_PD) + ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, + &info->he_obss_pd); + + if (changed & BSS_CHANGED_HE_BSS_COLOR) { + if (vif->type == NL80211_IFTYPE_AP) { + ret = ath12k_wmi_obss_color_cfg_cmd(ar, + arvif->vdev_id, + info->he_bss_color.color, + ATH12K_BSS_COLOR_AP_PERIODS, + info->he_bss_color.enabled); + if (ret) + ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", + arvif->vdev_id, ret); + } else if (vif->type == NL80211_IFTYPE_STATION) { + ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar, + arvif->vdev_id, + 1); + if (ret) + ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n", + arvif->vdev_id, ret); + ret = ath12k_wmi_obss_color_cfg_cmd(ar, + arvif->vdev_id, + 0, + ATH12K_BSS_COLOR_STA_PERIODS, + 1); + if (ret) + ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", + arvif->vdev_id, ret); + } + } + + if (changed & BSS_CHANGED_FILS_DISCOVERY || + changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) + ath12k_mac_fils_discovery(arvif, info); + + mutex_unlock(&ar->conf_mutex); +} + +void __ath12k_mac_scan_finish(struct ath12k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + break; + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = (ar->scan.state == + ATH12K_SCAN_ABORTING), + }; + + ieee80211_scan_completed(ar->hw, &info); + } else if (ar->scan.roc_notify) { + ieee80211_remain_on_channel_expired(ar->hw); + } + fallthrough; + case ATH12K_SCAN_STARTING: + ar->scan.state = ATH12K_SCAN_IDLE; + ar->scan_channel = NULL; + ar->scan.roc_freq = 0; + cancel_delayed_work(&ar->scan.timeout); + complete(&ar->scan.completed); + break; + } +} + +void ath12k_mac_scan_finish(struct ath12k *ar) +{ + spin_lock_bh(&ar->data_lock); + __ath12k_mac_scan_finish(ar); + spin_unlock_bh(&ar->data_lock); +} + +static int ath12k_scan_stop(struct ath12k *ar) +{ + struct ath12k_wmi_scan_cancel_arg arg = { + .req_type = WLAN_SCAN_CANCEL_SINGLE, + .scan_id = ATH12K_SCAN_ID, + }; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + /* TODO: Fill other STOP Params */ + arg.pdev_id = ar->pdev->pdev_id; + + ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret); + goto out; + } + + ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); + if (ret == 0) { + ath12k_warn(ar->ab, + "failed to receive scan abort comple: timed out\n"); + ret = -ETIMEDOUT; + } else if (ret > 0) { + ret = 0; + } + +out: + /* Scan state should be updated upon scan completion but in case + * firmware fails to deliver the event (for whatever reason) it is + * desired to clean up scan state anyway. Firmware may have just + * dropped the scan completion event delivery due to transport pipe + * being overflown with data and/or it can recover on its own before + * next scan request is submitted. + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state != ATH12K_SCAN_IDLE) + __ath12k_mac_scan_finish(ar); + spin_unlock_bh(&ar->data_lock); + + return ret; +} + +static void ath12k_scan_abort(struct ath12k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + /* This can happen if timeout worker kicked in and called + * abortion while scan completion was being processed. + */ + break; + case ATH12K_SCAN_STARTING: + case ATH12K_SCAN_ABORTING: + ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n", + ar->scan.state); + break; + case ATH12K_SCAN_RUNNING: + ar->scan.state = ATH12K_SCAN_ABORTING; + spin_unlock_bh(&ar->data_lock); + + ret = ath12k_scan_stop(ar); + if (ret) + ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + break; + } + + spin_unlock_bh(&ar->data_lock); +} + +static void ath12k_scan_timeout_work(struct work_struct *work) +{ + struct ath12k *ar = container_of(work, struct ath12k, + scan.timeout.work); + + mutex_lock(&ar->conf_mutex); + ath12k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); +} + +static int ath12k_start_scan(struct ath12k *ar, + struct ath12k_wmi_scan_req_arg *arg) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath12k_wmi_send_scan_start_cmd(ar, arg); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); + if (ret == 0) { + ret = ath12k_scan_stop(ar); + if (ret) + ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); + + return -ETIMEDOUT; + } + + /* If we failed to start the scan, return error code at + * this point. This is probably due to some issue in the + * firmware, but no need to wedge the driver due to that... + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ATH12K_SCAN_IDLE) { + spin_unlock_bh(&ar->data_lock); + return -EINVAL; + } + spin_unlock_bh(&ar->data_lock); + + return 0; +} + +static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct cfg80211_scan_request *req = &hw_req->req; + struct ath12k_wmi_scan_req_arg arg = {}; + int ret; + int i; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + ar->scan.state = ATH12K_SCAN_STARTING; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + ret = 0; + break; + case ATH12K_SCAN_STARTING: + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ret = -EBUSY; + break; + } + spin_unlock_bh(&ar->data_lock); + + if (ret) + goto exit; + + ath12k_wmi_start_scan_init(ar, &arg); + arg.vdev_id = arvif->vdev_id; + arg.scan_id = ATH12K_SCAN_ID; + + if (req->ie_len) { + arg.extraie.len = req->ie_len; + arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL); + memcpy(arg.extraie.ptr, req->ie, req->ie_len); + } + + if (req->n_ssids) { + arg.num_ssids = req->n_ssids; + for (i = 0; i < arg.num_ssids; i++) + arg.ssid[i] = req->ssids[i]; + } else { + arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; + } + + if (req->n_channels) { + arg.num_chan = req->n_channels; + for (i = 0; i < arg.num_chan; i++) + arg.chan_list[i] = req->channels[i]->center_freq; + } + + ret = ath12k_start_scan(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret); + spin_lock_bh(&ar->data_lock); + ar->scan.state = ATH12K_SCAN_IDLE; + spin_unlock_bh(&ar->data_lock); + } + + /* Add a margin to account for event/command processing */ + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(arg.max_scan_time + + ATH12K_MAC_SCAN_TIMEOUT_MSECS)); + +exit: + if (req->ie_len) + kfree(arg.extraie.ptr); + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + ath12k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); +} + +static int ath12k_install_key(struct ath12k_vif *arvif, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd, + const u8 *macaddr, u32 flags) +{ + int ret; + struct ath12k *ar = arvif->ar; + struct wmi_vdev_install_key_arg arg = { + .vdev_id = arvif->vdev_id, + .key_idx = key->keyidx, + .key_len = key->keylen, + .key_data = key->key, + .key_flags = flags, + .macaddr = macaddr, + }; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + reinit_completion(&ar->install_key_done); + + if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) + return 0; + + if (cmd == DISABLE_KEY) { + /* TODO: Check if FW expects value other than NONE for del */ + /* arg.key_cipher = WMI_CIPHER_NONE; */ + arg.key_len = 0; + arg.key_data = NULL; + goto install; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + arg.key_cipher = WMI_CIPHER_AES_CCM; + /* TODO: Re-check if flag is valid */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; + break; + case WLAN_CIPHER_SUITE_TKIP: + arg.key_cipher = WMI_CIPHER_TKIP; + arg.key_txmic_len = 8; + arg.key_rxmic_len = 8; + break; + case WLAN_CIPHER_SUITE_CCMP_256: + arg.key_cipher = WMI_CIPHER_AES_CCM; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + arg.key_cipher = WMI_CIPHER_AES_GCM; + break; + default: + ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); + return -EOPNOTSUPP; + } + + if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM; + +install: + ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg); + + if (ret) + return ret; + + if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) + return -ETIMEDOUT; + + if (ether_addr_equal(macaddr, arvif->vif->addr)) + arvif->key_cipher = key->cipher; + + return ar->install_key_status ? -EINVAL : 0; +} + +static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, + const u8 *addr) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + int first_errno = 0; + int ret; + int i; + u32 flags = 0; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find(ab, arvif->vdev_id, addr); + spin_unlock_bh(&ab->base_lock); + + if (!peer) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (!peer->keys[i]) + continue; + + /* key flags are not required to delete the key */ + ret = ath12k_install_key(arvif, peer->keys[i], + DISABLE_KEY, addr, flags); + if (ret < 0 && first_errno == 0) + first_errno = ret; + + if (ret < 0) + ath12k_warn(ab, "failed to remove peer key %d: %d\n", + i, ret); + + spin_lock_bh(&ab->base_lock); + peer->keys[i] = NULL; + spin_unlock_bh(&ab->base_lock); + } + + return first_errno; +} + +static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_peer *peer; + struct ath12k_sta *arsta; + const u8 *peer_addr; + int ret = 0; + u32 flags = 0; + + /* BIP needs to be done in software */ + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || + key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) + return 1; + + if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) + return 1; + + if (key->keyidx > WMI_MAX_KEY_INDEX) + return -ENOSPC; + + mutex_lock(&ar->conf_mutex); + + if (sta) + peer_addr = sta->addr; + else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + peer_addr = vif->bss_conf.bssid; + else + peer_addr = vif->addr; + + key->hw_key_idx = key->keyidx; + + /* the peer should not disappear in mid-way (unless FW goes awry) since + * we already hold conf_mutex. we just make sure its there now. + */ + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); + spin_unlock_bh(&ab->base_lock); + + if (!peer) { + if (cmd == SET_KEY) { + ath12k_warn(ab, "cannot install key for non-existent peer %pM\n", + peer_addr); + ret = -EOPNOTSUPP; + goto exit; + } else { + /* if the peer doesn't exist there is no key to disable + * anymore + */ + goto exit; + } + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + flags |= WMI_KEY_PAIRWISE; + else + flags |= WMI_KEY_GROUP; + + ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags); + if (ret) { + ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret); + goto exit; + } + + ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key); + if (ret) { + ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret); + goto exit; + } + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); + if (peer && cmd == SET_KEY) { + peer->keys[key->keyidx] = key; + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + peer->ucast_keyidx = key->keyidx; + peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher); + } else { + peer->mcast_keyidx = key->keyidx; + peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher); + } + } else if (peer && cmd == DISABLE_KEY) { + peer->keys[key->keyidx] = NULL; + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + peer->ucast_keyidx = 0; + else + peer->mcast_keyidx = 0; + } else if (!peer) + /* impossible unless FW goes crazy */ + ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); + + if (sta) { + arsta = (struct ath12k_sta *)sta->drv_priv; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (cmd == SET_KEY) + arsta->pn_type = HAL_PN_TYPE_WPA; + else + arsta->pn_type = HAL_PN_TYPE_NONE; + break; + default: + arsta->pn_type = HAL_PN_TYPE_NONE; + break; + } + } + + spin_unlock_bh(&ab->base_lock); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int +ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) + num_rates += hweight16(mask->control[band].vht_mcs[i]); + + return num_rates; +} + +static int +ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, + struct ieee80211_sta *sta, + const struct cfg80211_bitrate_mask *mask, + enum nl80211_band band) +{ + struct ath12k *ar = arvif->ar; + u8 vht_rate, nss; + u32 rate_code; + int ret, i; + + lockdep_assert_held(&ar->conf_mutex); + + nss = 0; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { + if (hweight16(mask->control[band].vht_mcs[i]) == 1) { + nss = i + 1; + vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; + } + } + + if (!nss) { + ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", + sta->addr); + return -EINVAL; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", + sta->addr); + + rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1, + WMI_RATE_PREAMBLE_VHT); + ret = ath12k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_PARAM_FIXED_RATE, + rate_code); + if (ret) + ath12k_warn(ar->ab, + "failed to update STA %pM Fixed Rate %d: %d\n", + sta->addr, rate_code, ret); + + return ret; +} + +static int ath12k_station_assoc(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_wmi_peer_assoc_arg peer_arg; + int ret; + struct cfg80211_chan_def def; + enum nl80211_band band; + struct cfg80211_bitrate_mask *mask; + u8 num_vht_rates; + + lockdep_assert_held(&ar->conf_mutex); + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return -EPERM; + + band = def.chan->band; + mask = &arvif->bitrate_mask; + + ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + + ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (ret) { + ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { + ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", + sta->addr, arvif->vdev_id); + return -ETIMEDOUT; + } + + num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask); + + /* If single VHT rate is configured (by set_bitrate_mask()), + * peer_assoc will disable VHT. This is now enabled by a peer specific + * fixed param. + * Note that all other rates and NSS will be disabled for this peer. + */ + if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { + ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + band); + if (ret) + return ret; + } + + /* Re-assoc is run only to update supported rates for given station. It + * doesn't make much sense to reconfigure the peer completely. + */ + if (reassoc) + return 0; + + ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, + &sta->deflink.ht_cap); + if (ret) { + ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath12k_recalc_rtscts_prot(arvif); + if (ret) + return ret; + } + + if (sta->wme && sta->uapsd_queues) { + ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_station_disassoc(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (!sta->wme) { + arvif->num_legacy_stations--; + ret = ath12k_recalc_rtscts_prot(arvif); + if (ret) + return ret; + } + + ret = ath12k_clear_peer_keys(arvif, sta->addr); + if (ret) { + ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + return 0; +} + +static void ath12k_sta_rc_update_wk(struct work_struct *wk) +{ + struct ath12k *ar; + struct ath12k_vif *arvif; + struct ath12k_sta *arsta; + struct ieee80211_sta *sta; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u32 changed, bw, nss, smps; + int err, num_vht_rates; + const struct cfg80211_bitrate_mask *mask; + struct ath12k_wmi_peer_assoc_arg peer_arg; + + arsta = container_of(wk, struct ath12k_sta, update_wk); + sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + arvif = arsta->arvif; + ar = arvif->ar; + + if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + spin_lock_bh(&ar->data_lock); + + changed = arsta->changed; + arsta->changed = 0; + + bw = arsta->bw; + nss = arsta->nss; + smps = arsta->smps; + + spin_unlock_bh(&ar->data_lock); + + mutex_lock(&ar->conf_mutex); + + nss = max_t(u32, 1, nss); + nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), + ath12k_mac_max_vht_nss(vht_mcs_mask))); + + if (changed & IEEE80211_RC_BW_CHANGED) { + err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_CHWIDTH, bw); + if (err) + ath12k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", + sta->addr, bw, err); + } + + if (changed & IEEE80211_RC_NSS_CHANGED) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n", + sta->addr, nss); + + err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_NSS, nss); + if (err) + ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", + sta->addr, nss, err); + } + + if (changed & IEEE80211_RC_SMPS_CHANGED) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n", + sta->addr, smps); + + err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_MIMO_PS_STATE, smps); + if (err) + ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", + sta->addr, smps, err); + } + + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { + mask = &arvif->bitrate_mask; + num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, + mask); + + /* Peer_assoc_prepare will reject vht rates in + * bitrate_mask if its not available in range format and + * sets vht tx_rateset as unsupported. So multiple VHT MCS + * setting(eg. MCS 4,5,6) per peer is not supported here. + * But, Single rate in VHT mask can be set as per-peer + * fixed rate. But even if any HT rates are configured in + * the bitrate mask, device will not switch to those rates + * when per-peer Fixed rate is set. + * TODO: Check RATEMASK_CMDID to support auto rates selection + * across HT/VHT and for multiple VHT MCS support. + */ + if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { + ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + band); + } else { + /* If the peer is non-VHT or no fixed VHT rate + * is provided in the new bitrate mask we set the + * other rates using peer_assoc command. + */ + ath12k_peer_assoc_prepare(ar, arvif->vif, sta, + &peer_arg, true); + + err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (err) + ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", + sta->addr, arvif->vdev_id, err); + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) + ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", + sta->addr, arvif->vdev_id); + } + } + + mutex_unlock(&ar->conf_mutex); +} + +static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath12k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -ENOBUFS; + + ar->num_stations++; + + return 0; +} + +static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath12k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + return; + + ar->num_stations--; +} + +static int ath12k_mac_station_add(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_wmi_peer_create_arg peer_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath12k_mac_inc_num_stations(arvif, sta); + if (ret) { + ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n", + ar->max_num_stations); + goto exit; + } + + arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); + if (!arsta->rx_stats) { + ret = -ENOMEM; + goto dec_num_station; + } + + peer_param.vdev_id = arvif->vdev_id; + peer_param.peer_addr = sta->addr; + peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; + + ret = ath12k_peer_create(ar, arvif, sta, &peer_param); + if (ret) { + ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + goto free_peer; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + + if (ieee80211_vif_is_mesh(vif)) { + ret = ath12k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_USE_4ADDR, 1); + if (ret) { + ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n", + sta->addr, ret); + goto free_peer; + } + } + + ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); + if (ret) { + ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", + sta->addr, arvif->vdev_id, ret); + goto free_peer; + } + + if (ab->hw_params->vdev_start_delay && + !arvif->is_started && + arvif->vdev_type != WMI_VDEV_TYPE_AP) { + ret = ath12k_start_vdev_delay(ar->hw, vif); + if (ret) { + ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); + goto free_peer; + } + } + + return 0; + +free_peer: + ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); +dec_num_station: + ath12k_mac_dec_num_stations(arvif, sta); +exit: + return ret; +} + +static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_peer *peer; + int ret = 0; + + /* cancel must be done outside the mutex to avoid deadlock */ + if ((old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST)) + cancel_work_sync(&arsta->update_wk); + + mutex_lock(&ar->conf_mutex); + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + memset(arsta, 0, sizeof(*arsta)); + arsta->arvif = arvif; + INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk); + + ret = ath12k_mac_station_add(ar, vif, sta); + if (ret) + ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + } else if ((old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST)) { + ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); + + ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); + if (ret) + ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + + ath12k_mac_dec_num_stations(arvif, sta); + spin_lock_bh(&ar->ab->base_lock); + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer && peer->sta == sta) { + ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", + vif->addr, arvif->vdev_id); + peer->sta = NULL; + list_del(&peer->list); + kfree(peer); + ar->num_peers--; + } + spin_unlock_bh(&ar->ab->base_lock); + + kfree(arsta->rx_stats); + arsta->rx_stats = NULL; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT || + vif->type == NL80211_IFTYPE_ADHOC)) { + ret = ath12k_station_assoc(ar, vif, sta, false); + if (ret) + ath12k_warn(ar->ab, "Failed to associate station: %pM\n", + sta->addr); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { + ret = ath12k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", + sta->addr, arvif->vdev_id, ret); + } + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = false; + + spin_unlock_bh(&ar->ab->base_lock); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT || + vif->type == NL80211_IFTYPE_ADHOC)) { + ret = ath12k_station_disassoc(ar, vif, sta); + if (ret) + ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", + sta->addr); + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + s16 txpwr; + + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { + txpwr = 0; + } else { + txpwr = sta->deflink.txpwr.power; + if (!txpwr) + return -EINVAL; + } + + if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_USE_FIXED_PWR, txpwr); + if (ret) { + ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n", + ret); + goto out; + } + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ + struct ath12k *ar = hw->priv; + struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_peer *peer; + u32 bw, smps; + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->ab->base_lock); + ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", + sta->addr, arvif->vdev_id); + return; + } + + spin_unlock_bh(&ar->ab->base_lock); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", + sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, + sta->deflink.smps_mode); + + spin_lock_bh(&ar->data_lock); + + if (changed & IEEE80211_RC_BW_CHANGED) { + bw = WMI_PEER_CHWIDTH_20MHZ; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_20: + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + case IEEE80211_STA_RX_BW_40: + bw = WMI_PEER_CHWIDTH_40MHZ; + break; + case IEEE80211_STA_RX_BW_80: + bw = WMI_PEER_CHWIDTH_80MHZ; + break; + case IEEE80211_STA_RX_BW_160: + bw = WMI_PEER_CHWIDTH_160MHZ; + break; + default: + ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", + sta->deflink.bandwidth, sta->addr); + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + } + + arsta->bw = bw; + } + + if (changed & IEEE80211_RC_NSS_CHANGED) + arsta->nss = sta->deflink.rx_nss; + + if (changed & IEEE80211_RC_SMPS_CHANGED) { + smps = WMI_PEER_SMPS_PS_NONE; + + switch (sta->deflink.smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_OFF: + smps = WMI_PEER_SMPS_PS_NONE; + break; + case IEEE80211_SMPS_STATIC: + smps = WMI_PEER_SMPS_STATIC; + break; + case IEEE80211_SMPS_DYNAMIC: + smps = WMI_PEER_SMPS_DYNAMIC; + break; + default: + ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", + sta->deflink.smps_mode, sta->addr); + smps = WMI_PEER_SMPS_PS_NONE; + break; + } + + arsta->smps = smps; + } + + arsta->changed |= changed; + + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(hw, &arsta->update_wk); +} + +static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif, + u16 ac, bool enable) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + u32 value; + int ret; + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + switch (ac) { + case IEEE80211_AC_VO: + value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; + break; + case IEEE80211_AC_VI: + value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; + break; + case IEEE80211_AC_BE: + value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; + break; + case IEEE80211_AC_BK: + value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; + break; + } + + if (enable) + arvif->u.sta.uapsd |= value; + else + arvif->u.sta.uapsd &= ~value; + + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + WMI_STA_PS_PARAM_UAPSD, + arvif->u.sta.uapsd); + if (ret) { + ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret); + goto exit; + } + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; + else + value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; + + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + WMI_STA_PS_PARAM_RX_WAKE_POLICY, + value); + if (ret) + ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret); + +exit: + return ret; +} + +static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct wmi_wmm_params_arg *p = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + switch (ac) { + case IEEE80211_AC_VO: + p = &arvif->wmm_params.ac_vo; + break; + case IEEE80211_AC_VI: + p = &arvif->wmm_params.ac_vi; + break; + case IEEE80211_AC_BE: + p = &arvif->wmm_params.ac_be; + break; + case IEEE80211_AC_BK: + p = &arvif->wmm_params.ac_bk; + break; + } + + if (WARN_ON(!p)) { + ret = -EINVAL; + goto exit; + } + + p->cwmin = params->cw_min; + p->cwmax = params->cw_max; + p->aifs = params->aifs; + p->txop = params->txop; + + ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id, + &arvif->wmm_params); + if (ret) { + ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret); + goto exit; + } + + ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd); + + if (ret) + ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static struct ieee80211_sta_ht_cap +ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) +{ + int i; + struct ieee80211_sta_ht_cap ht_cap = {0}; + u32 ar_vht_cap = ar->pdev->cap.vht_cap; + + if (!(ar_ht_cap & WMI_HT_CAP_ENABLED)) + return ht_cap; + + ht_cap.ht_supported = 1; + ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; + ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; + ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (ar_ht_cap & WMI_HT_CAP_HT20_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + + if (ar_ht_cap & WMI_HT_CAP_HT40_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + + if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) { + u32 smps; + + smps = WLAN_HT_CAP_SM_PS_DYNAMIC; + smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; + + ht_cap.cap |= smps; + } + + if (ar_ht_cap & WMI_HT_CAP_TX_STBC) + ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; + + if (ar_ht_cap & WMI_HT_CAP_RX_STBC) { + u32 stbc; + + stbc = ar_ht_cap; + stbc &= WMI_HT_CAP_RX_STBC; + stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; + stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; + stbc &= IEEE80211_HT_CAP_RX_STBC; + + ht_cap.cap |= stbc; + } + + if (ar_ht_cap & WMI_HT_CAP_RX_LDPC) + ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT) + ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; + + if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) + ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + for (i = 0; i < ar->num_rx_chains; i++) { + if (rate_cap_rx_chainmask & BIT(i)) + ht_cap.mcs.rx_mask[i] = 0xFF; + } + + ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; + + return ht_cap; +} + +static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) +{ + u32 value = 0; + struct ath12k *ar = arvif->ar; + int nsts; + int sound_dim; + u32 vht_cap = ar->pdev->cap.vht_cap; + u32 vdev_param = WMI_VDEV_PARAM_TXBF; + + if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { + nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); + } + + if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + sound_dim = vht_cap & + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; + sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + if (sound_dim > (ar->num_tx_chains - 1)) + sound_dim = ar->num_tx_chains - 1; + value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); + } + + if (!value) + return 0; + + if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) { + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && + arvif->vdev_type == WMI_VDEV_TYPE_AP) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; + } + + if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && + arvif->vdev_type == WMI_VDEV_TYPE_STA) + value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; + } + + return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, value); +} + +static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap) +{ + bool subfer, subfee; + int sound_dim = 0; + + subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); + subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); + + if (ar->num_tx_chains < 2) { + *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); + subfer = false; + } + + /* If SU Beaformer is not set, then disable MU Beamformer Capability */ + if (!subfer) + *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); + + /* If SU Beaformee is not set, then disable MU Beamformee Capability */ + if (!subfee) + *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + + sound_dim = u32_get_bits(*vht_cap, + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + *vht_cap = u32_replace_bits(*vht_cap, 0, + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + + /* TODO: Need to check invalid STS and Sound_dim values set by FW? */ + + /* Enable Sounding Dimension Field only if SU BF is enabled */ + if (subfer) { + if (sound_dim > (ar->num_tx_chains - 1)) + sound_dim = ar->num_tx_chains - 1; + + *vht_cap = u32_replace_bits(*vht_cap, sound_dim, + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + } + + /* Use the STS advertised by FW unless SU Beamformee is not supported*/ + if (!subfee) + *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); +} + +static struct ieee80211_sta_vht_cap +ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask, + u32 rate_cap_rx_chainmask) +{ + struct ieee80211_sta_vht_cap vht_cap = {0}; + u16 txmcs_map, rxmcs_map; + int i; + + vht_cap.vht_supported = 1; + vht_cap.cap = ar->pdev->cap.vht_cap; + + ath12k_set_vht_txbf_cap(ar, &vht_cap.cap); + + /* TODO: Enable back VHT160 mode once association issues are fixed */ + /* Disabling VHT160 and VHT80+80 modes */ + vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; + + rxmcs_map = 0; + txmcs_map = 0; + for (i = 0; i < 8; i++) { + if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i)) + txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + + if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i)) + rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + } + + if (rate_cap_tx_chainmask <= 1) + vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; + + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); + vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); + + return vht_cap; +} + +static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar, + struct ath12k_pdev_cap *cap, + u32 *ht_cap_info) +{ + struct ieee80211_supported_band *band; + u32 rate_cap_tx_chainmask; + u32 rate_cap_rx_chainmask; + u32 ht_cap; + + rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; + rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; + + if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; + ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info; + if (ht_cap_info) + *ht_cap_info = ht_cap; + band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, + rate_cap_rx_chainmask); + } + + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && + (ar->ab->hw_params->single_pdev_only || + !ar->supports_6ghz)) { + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; + ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; + if (ht_cap_info) + *ht_cap_info = ht_cap; + band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, + rate_cap_rx_chainmask); + band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask, + rate_cap_rx_chainmask); + } +} + +static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant) +{ + /* TODO: Check the request chainmask against the supported + * chainmask table which is advertised in extented_service_ready event + */ + + return 0; +} + +static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, + u8 *he_ppet) +{ + int nss, ru; + u8 bit = 7; + + he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK; + he_ppet[0] |= (fw_ppet->ru_bit_mask << + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; + for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { + for (ru = 0; ru < 4; ru++) { + u8 val; + int i; + + if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) + continue; + val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & + 0x3f; + val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); + for (i = 5; i >= 0; i--) { + he_ppet[bit / 8] |= + ((val >> i) & 0x1) << ((bit % 8)); + bit++; + } + } + } +} + +static void +ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) +{ + u8 m; + + m = IEEE80211_HE_MAC_CAP0_TWT_RES | + IEEE80211_HE_MAC_CAP0_TWT_REQ; + he_cap_elem->mac_cap_info[0] &= ~m; + + m = IEEE80211_HE_MAC_CAP2_TRS | + IEEE80211_HE_MAC_CAP2_BCAST_TWT | + IEEE80211_HE_MAC_CAP2_MU_CASCADING; + he_cap_elem->mac_cap_info[2] &= ~m; + + m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED | + IEEE80211_HE_MAC_CAP2_BCAST_TWT | + IEEE80211_HE_MAC_CAP2_MU_CASCADING; + he_cap_elem->mac_cap_info[3] &= ~m; + + m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG | + IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->mac_cap_info[4] &= ~m; + + m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | + IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | + IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; + he_cap_elem->mac_cap_info[5] &= ~m; + + m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + he_cap_elem->phy_cap_info[2] &= ~m; + + m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; + he_cap_elem->phy_cap_info[3] &= ~m; + + m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + he_cap_elem->phy_cap_info[4] &= ~m; + + m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; + he_cap_elem->phy_cap_info[5] &= ~m; + + m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; + he_cap_elem->phy_cap_info[6] &= ~m; + + m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | + IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | + IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; + he_cap_elem->phy_cap_info[7] &= ~m; + + m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; + he_cap_elem->phy_cap_info[8] &= ~m; + + m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + he_cap_elem->phy_cap_info[9] &= ~m; +} + +static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap, + struct ath12k_band_cap *bcap) +{ + u8 val; + + bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; + if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) + bcap->he_6ghz_capa |= + u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC, + IEEE80211_HE_6GHZ_CAP_SM_PS); + else + bcap->he_6ghz_capa |= + u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, + IEEE80211_HE_6GHZ_CAP_SM_PS); + val = u32_get_bits(pcap->vht_cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + bcap->he_6ghz_capa |= + u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + val = u32_get_bits(pcap->vht_cap, + IEEE80211_VHT_CAP_MAX_MPDU_MASK); + bcap->he_6ghz_capa |= + u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); + if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) + bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) + bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; + + return cpu_to_le16(bcap->he_6ghz_capa); +} + +static int ath12k_mac_copy_he_cap(struct ath12k *ar, + struct ath12k_pdev_cap *cap, + struct ieee80211_sband_iftype_data *data, + int band) +{ + int i, idx = 0; + + for (i = 0; i < NUM_NL80211_IFTYPES; i++) { + struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; + struct ath12k_band_cap *band_cap = &cap->band[band]; + struct ieee80211_he_cap_elem *he_cap_elem = + &he_cap->he_cap_elem; + + switch (i) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + break; + + default: + continue; + } + + data[idx].types_mask = BIT(i); + he_cap->has_he = true; + memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, + sizeof(he_cap_elem->mac_cap_info)); + memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, + sizeof(he_cap_elem->phy_cap_info)); + + he_cap_elem->mac_cap_info[1] &= + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; + + he_cap_elem->phy_cap_info[5] &= + ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; + he_cap_elem->phy_cap_info[5] &= + ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; + he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; + + switch (i) { + case NL80211_IFTYPE_AP: + he_cap_elem->phy_cap_info[3] &= + ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_STATION: + he_cap_elem->mac_cap_info[0] &= + ~IEEE80211_HE_MAC_CAP0_TWT_RES; + he_cap_elem->mac_cap_info[0] |= + IEEE80211_HE_MAC_CAP0_TWT_REQ; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_MESH_POINT: + ath12k_mac_filter_he_cap_mesh(he_cap_elem); + break; + } + + he_cap->he_mcs_nss_supp.rx_mcs_80 = + cpu_to_le16(band_cap->he_mcs & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_80 = + cpu_to_le16(band_cap->he_mcs & 0xffff); + he_cap->he_mcs_nss_supp.rx_mcs_160 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_160 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.rx_mcs_80p80 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_80p80 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + if (he_cap_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) + ath12k_gen_ppe_thresh(&band_cap->he_ppet, + he_cap->ppe_thres); + + if (band == NL80211_BAND_6GHZ) { + data[idx].he_6ghz_capa.capa = + ath12k_mac_setup_he_6ghz_cap(cap, band_cap); + } + idx++; + } + + return idx; +} + +static void ath12k_mac_setup_he_cap(struct ath12k *ar, + struct ath12k_pdev_cap *cap) +{ + struct ieee80211_supported_band *band; + int count; + + if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { + count = ath12k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[NL80211_BAND_2GHZ], + NL80211_BAND_2GHZ); + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; + band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ]; + band->n_iftype_data = count; + } + + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { + count = ath12k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[NL80211_BAND_5GHZ], + NL80211_BAND_5GHZ); + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; + band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ]; + band->n_iftype_data = count; + } + + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && + ar->supports_6ghz) { + count = ath12k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[NL80211_BAND_6GHZ], + NL80211_BAND_6GHZ); + band = &ar->mac.sbands[NL80211_BAND_6GHZ]; + band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ]; + band->n_iftype_data = count; + } +} + +static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ath12k_check_chain_mask(ar, tx_ant, true)) + return -EINVAL; + + if (ath12k_check_chain_mask(ar, rx_ant, false)) + return -EINVAL; + + ar->cfg_tx_chainmask = tx_ant; + ar->cfg_rx_chainmask = rx_ant; + + if (ar->state != ATH12K_STATE_ON && + ar->state != ATH12K_STATE_RESTARTED) + return 0; + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, + tx_ant, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n", + ret, tx_ant); + return ret; + } + + ar->num_tx_chains = hweight32(tx_ant); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK, + rx_ant, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n", + ret, rx_ant); + return ret; + } + + ar->num_rx_chains = hweight32(rx_ant); + + /* Reload HT/VHT/HE capability */ + ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); + ath12k_mac_setup_he_cap(ar, &ar->pdev->cap); + + return 0; +} + +int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct sk_buff *msdu = skb; + struct ieee80211_tx_info *info; + struct ath12k *ar = ctx; + struct ath12k_base *ab = ar->ab; + + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, + DMA_TO_DEVICE); + + info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + + ieee80211_free_txskb(ar->hw, msdu); + + return 0; +} + +static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) +{ + struct ieee80211_vif *vif = ctx; + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); + struct sk_buff *msdu = skb; + struct ath12k *ar = skb_cb->ar; + struct ath12k_base *ab = ar->ab; + + if (skb_cb->vif == vif) { + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, + DMA_TO_DEVICE); + } + + return 0; +} + +static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, + struct sk_buff *skb) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info; + dma_addr_t paddr; + int buf_id; + int ret; + + spin_lock_bh(&ar->txmgmt_idr_lock); + buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, + ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); + spin_unlock_bh(&ar->txmgmt_idr_lock); + if (buf_id < 0) + return -ENOSPC; + + info = IEEE80211_SKB_CB(skb); + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(skb, IEEE80211_CCMP_MIC_LEN); + } + } + + paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(ab->dev, paddr)) { + ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n"); + ret = -EIO; + goto err_free_idr; + } + + ATH12K_SKB_CB(skb)->paddr = paddr; + + ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); + if (ret) { + ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret); + goto err_unmap_buf; + } + + return 0; + +err_unmap_buf: + dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); +err_free_idr: + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + + return ret; +} + +static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) + ieee80211_free_txskb(ar->hw, skb); +} + +static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) +{ + struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); + struct ath12k_skb_cb *skb_cb; + struct ath12k_vif *arvif; + struct sk_buff *skb; + int ret; + + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { + skb_cb = ATH12K_SKB_CB(skb); + if (!skb_cb->vif) { + ath12k_warn(ar->ab, "no vif found for mgmt frame\n"); + ieee80211_free_txskb(ar->hw, skb); + continue; + } + + arvif = ath12k_vif_to_arvif(skb_cb->vif); + if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && + arvif->is_started) { + ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); + if (ret) { + ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", + arvif->vdev_id, ret); + ieee80211_free_txskb(ar->hw, skb); + } else { + atomic_inc(&ar->num_pending_mgmt_tx); + } + } else { + ath12k_warn(ar->ab, + "dropping mgmt frame for vdev %d, is_started %d\n", + arvif->vdev_id, + arvif->is_started); + ieee80211_free_txskb(ar->hw, skb); + } + } +} + +static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb, + bool is_prb_rsp) +{ + struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; + + if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + return -ESHUTDOWN; + + /* Drop probe response packets when the pending management tx + * count has reached a certain threshold, so as to prioritize + * other mgmt packets like auth and assoc to be sent on time + * for establishing successful connections. + */ + if (is_prb_rsp && + atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) { + ath12k_warn(ar->ab, + "dropping probe response as pending queue is almost full\n"); + return -ENOSPC; + } + + if (skb_queue_len(q) == ATH12K_TX_MGMT_NUM_PENDING_MAX) { + ath12k_warn(ar->ab, "mgmt tx queue is full\n"); + return -ENOSPC; + } + + skb_queue_tail(q, skb); + ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + + return 0; +} + +static void ath12k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); + struct ath12k *ar = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_key_conf *key = info->control.hw_key; + u32 info_flags = info->flags; + bool is_prb_rsp; + int ret; + + memset(skb_cb, 0, sizeof(*skb_cb)); + skb_cb->vif = vif; + + if (key) { + skb_cb->cipher = key->cipher; + skb_cb->flags |= ATH12K_SKB_CIPHER_SET; + } + + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; + } else if (ieee80211_is_mgmt(hdr->frame_control)) { + is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); + ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); + if (ret) { + ath12k_warn(ar->ab, "failed to queue management frame %d\n", + ret); + ieee80211_free_txskb(ar->hw, skb); + } + return; + } + + ret = ath12k_dp_tx(ar, arvif, skb); + if (ret) { + ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath12k_mac_drain_tx(struct ath12k *ar) +{ + /* make sure rcu-protected mac80211 tx path itself is drained */ + synchronize_net(); + + cancel_work_sync(&ar->wmi_mgmt_tx_work); + ath12k_mgmt_over_wmi_tx_purge(ar); +} + +static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) +{ + return -ENOTSUPP; + /* TODO: Need to support new monitor mode */ +} + +static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) +{ + int recovery_start_count; + + if (!ab->is_reset) + return; + + recovery_start_count = atomic_inc_return(&ab->recovery_start_count); + + ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); + + if (recovery_start_count == ab->num_radios) { + complete(&ab->recovery_start); + ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); + + wait_for_completion_timeout(&ab->reconfigure_complete, + ATH12K_RECONFIGURE_TIMEOUT_HZ); +} + +static int ath12k_mac_op_start(struct ieee80211_hw *hw) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev *pdev = ar->pdev; + int ret; + + ath12k_mac_drain_tx(ar); + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ATH12K_STATE_OFF: + ar->state = ATH12K_STATE_ON; + break; + case ATH12K_STATE_RESTARTING: + ar->state = ATH12K_STATE_RESTARTED; + ath12k_mac_wait_reconfigure(ab); + break; + case ATH12K_STATE_RESTARTED: + case ATH12K_STATE_WEDGED: + case ATH12K_STATE_ON: + WARN_ON(1); + ret = -EINVAL; + goto err; + } + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, + 1, pdev->pdev_id); + + if (ret) { + ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret); + goto err; + } + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, + pdev->pdev_id); + if (ret) { + ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret); + goto err; + } + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + 0, pdev->pdev_id); + if (ret) { + ath12k_err(ab, "failed to set ac override for ARP: %d\n", + ret); + goto err; + } + + ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); + if (ret) { + ath12k_err(ab, "failed to offload radar detection: %d\n", + ret); + goto err; + } + + ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar, + HTT_PPDU_STATS_TAG_DEFAULT); + if (ret) { + ath12k_err(ab, "failed to req ppdu stats: %d\n", ret); + goto err; + } + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, + 1, pdev->pdev_id); + + if (ret) { + ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); + goto err; + } + + __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); + + /* TODO: Do we need to enable ANI? */ + + ath12k_reg_update_chan_list(ar); + + ar->num_started_vdevs = 0; + ar->num_created_vdevs = 0; + ar->num_peers = 0; + ar->allocated_vdev_map = 0; + + /* Configure monitor status ring with default rx_filter to get rx status + * such as rssi, rx_duration. + */ + ret = ath12k_mac_config_mon_status_default(ar, true); + if (ret && (ret != -ENOTSUPP)) { + ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", + ret); + goto err; + } + + if (ret == -ENOTSUPP) + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "monitor status config is not yet supported"); + + /* Configure the hash seed for hash based reo dest ring selection */ + ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id); + + /* allow device to enter IMPS */ + if (ab->hw_params->idle_ps) { + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, + 1, pdev->pdev_id); + if (ret) { + ath12k_err(ab, "failed to enable idle ps: %d\n", ret); + goto err; + } + } + + mutex_unlock(&ar->conf_mutex); + + rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], + &ab->pdevs[ar->pdev_idx]); + + return 0; + +err: + ar->state = ATH12K_STATE_OFF; + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void ath12k_mac_op_stop(struct ieee80211_hw *hw) +{ + struct ath12k *ar = hw->priv; + struct htt_ppdu_stats_info *ppdu_stats, *tmp; + int ret; + + ath12k_mac_drain_tx(ar); + + mutex_lock(&ar->conf_mutex); + ret = ath12k_mac_config_mon_status_default(ar, false); + if (ret && (ret != -ENOTSUPP)) + ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", + ret); + + clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); + ar->state = ATH12K_STATE_OFF; + mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->regd_update_work); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { + list_del(&ppdu_stats->list); + kfree(ppdu_stats); + } + spin_unlock_bh(&ar->data_lock); + + rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); + + synchronize_rcu(); + + atomic_set(&ar->num_pending_mgmt_tx, 0); +} + +static u8 +ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) +{ + struct ath12k_base *ab = arvif->ar->ab; + u8 vdev_stats_id = 0; + + do { + if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) { + vdev_stats_id++; + if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) { + vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID; + break; + } + } else { + ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id); + break; + } + } while (vdev_stats_id); + + arvif->vdev_stats_id = vdev_stats_id; + return vdev_stats_id; +} + +static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, + struct ath12k_wmi_vdev_create_arg *arg) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_pdev *pdev = ar->pdev; + + arg->if_id = arvif->vdev_id; + arg->type = arvif->vdev_type; + arg->subtype = arvif->vdev_subtype; + arg->pdev_id = pdev->pdev_id; + + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { + arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; + arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; + } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { + arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; + arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; + } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && + ar->supports_6ghz) { + arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; + arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; + } + + arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); +} + +static u32 +ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype) +{ + struct ath12k_pdev_cap *pdev_cap = &pdev->cap; + struct ath12k_band_cap *cap_band = NULL; + u32 *hecap_phy_ptr = NULL; + u32 hemode; + + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) + cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; + else + cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; + + hecap_phy_ptr = &cap_band->he_cap_phy_info[0]; + + hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) | + u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr), + HE_MODE_SU_TX_BFER) | + u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr), + HE_MODE_UL_MUMIMO); + + /* TODO: WDS and other modes */ + if (viftype == NL80211_IFTYPE_AP) { + hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr), + HE_MODE_MU_TX_BFER) | + u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) | + u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA); + } else { + hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE); + } + + return hemode; +} + +static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, + struct ath12k_vif *arvif) +{ + u32 param_id, param_value; + struct ath12k_base *ab = ar->ab; + int ret; + + param_id = WMI_VDEV_PARAM_SET_HEMU_MODE; + param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n", + arvif->vdev_id, ret, param_value); + return ret; + } + param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE; + param_value = + u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) | + u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE, + HE_TRIG_NONTRIG_SOUNDING_MODE); + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n", + arvif->vdev_id, ret); + return ret; + } + return ret; +} + +static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + u32 param_id, param_value; + int ret; + + param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | + IEEE80211_OFFLOAD_DECAP_ENABLED); + + if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) + arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; + else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) + arvif->tx_encap_type = ATH12K_HW_TXRX_RAW; + else + arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; + + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, arvif->tx_encap_type); + if (ret) { + ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", + arvif->vdev_id, ret); + vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + } + + param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; + if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) + param_value = ATH12K_HW_TXRX_ETHERNET; + else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) + param_value = ATH12K_HW_TXRX_RAW; + else + param_value = ATH12K_HW_TXRX_NATIVE_WIFI; + + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", + arvif->vdev_id, ret); + vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; + } +} + +static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; + struct ath12k_wmi_peer_create_arg peer_param; + u32 param_id, param_value; + u16 nss; + int i; + int ret; + int bit; + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + mutex_lock(&ar->conf_mutex); + + if (vif->type == NL80211_IFTYPE_AP && + ar->num_peers > (ar->max_num_peers - 1)) { + ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); + ret = -ENOBUFS; + goto err; + } + + if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { + ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", + TARGET_NUM_VDEVS); + ret = -EBUSY; + goto err; + } + + memset(arvif, 0, sizeof(*arvif)); + + arvif->ar = ar; + arvif->vif = vif; + + INIT_LIST_HEAD(&arvif->list); + + /* Should we initialize any worker to handle connection loss indication + * from firmware in sta mode? + */ + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + bit = __ffs64(ab->free_vdev_map); + + arvif->vdev_id = bit; + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + + switch (vif->type) { + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_STATION: + arvif->vdev_type = WMI_VDEV_TYPE_STA; + break; + case NL80211_IFTYPE_MESH_POINT: + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; + fallthrough; + case NL80211_IFTYPE_AP: + arvif->vdev_type = WMI_VDEV_TYPE_AP; + break; + case NL80211_IFTYPE_MONITOR: + arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; + ar->monitor_vdev_id = bit; + break; + default: + WARN_ON(1); + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + ab->free_vdev_map); + + vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1); + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1); + + ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); + + ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); + if (ret) { + ath12k_warn(ab, "failed to create WMI vdev %d: %d\n", + arvif->vdev_id, ret); + goto err; + } + + ar->num_created_vdevs++; + ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n", + vif->addr, arvif->vdev_id); + ar->allocated_vdev_map |= 1LL << arvif->vdev_id; + ab->free_vdev_map &= ~(1LL << arvif->vdev_id); + + spin_lock_bh(&ar->data_lock); + list_add(&arvif->list, &ar->arvifs); + spin_unlock_bh(&ar->data_lock); + + ath12k_mac_op_update_vif_offload(hw, vif); + + nss = hweight32(ar->cfg_tx_chainmask) ? : 1; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_NSS, nss); + if (ret) { + ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); + goto err_vdev_del; + } + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_AP: + peer_param.vdev_id = arvif->vdev_id; + peer_param.peer_addr = vif->addr; + peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; + ret = ath12k_peer_create(ar, arvif, NULL, &peer_param); + if (ret) { + ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n", + arvif->vdev_id, ret); + goto err_vdev_del; + } + + ret = ath12k_mac_set_kickout(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n", + arvif->vdev_id, ret); + goto err_peer_del; + } + break; + case WMI_VDEV_TYPE_STA: + param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; + param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n", + arvif->vdev_id, ret); + goto err_peer_del; + } + + param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; + param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n", + arvif->vdev_id, ret); + goto err_peer_del; + } + + param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT; + param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; + ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n", + arvif->vdev_id, ret); + goto err_peer_del; + } + + ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false); + if (ret) { + ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", + arvif->vdev_id, ret); + goto err_peer_del; + } + break; + default: + break; + } + + arvif->txpower = vif->bss_conf.txpower; + ret = ath12k_mac_txpower_recalc(ar); + if (ret) + goto err_peer_del; + + param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; + param_value = ar->hw->wiphy->rts_threshold; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n", + arvif->vdev_id, ret); + } + + ath12k_dp_vdev_tx_attach(ar, arvif); + + if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) + ath12k_mac_monitor_vdev_create(ar); + + mutex_unlock(&ar->conf_mutex); + + return ret; + +err_peer_del: + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + reinit_completion(&ar->peer_delete_done); + + ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr, + arvif->vdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", + arvif->vdev_id, vif->addr); + goto err; + } + + ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); + if (ret) + goto err; + + ar->num_peers--; + } + +err_vdev_del: + ath12k_wmi_vdev_delete(ar, arvif->vdev_id); + ar->num_created_vdevs--; + ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); + ab->free_vdev_map |= 1LL << arvif->vdev_id; + ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) +{ + struct ath12k_tx_desc_info *tx_desc_info, *tmp1; + struct ath12k_skb_cb *skb_cb; + struct sk_buff *skb; + int i; + + for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { + spin_lock_bh(&dp->tx_desc_lock[i]); + + list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i], + list) { + skb = tx_desc_info->skb; + if (!skb) + continue; + + skb_cb = ATH12K_SKB_CB(skb); + if (skb_cb->vif == vif) + skb_cb->vif = NULL; + } + + spin_unlock_bh(&dp->tx_desc_lock[i]); + } +} + +static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_base *ab = ar->ab; + unsigned long time_left; + int ret; + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", + arvif->vdev_id); + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); + if (ret) + ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", + arvif->vdev_id, ret); + } + + reinit_completion(&ar->vdev_delete_done); + + ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id); + if (ret) { + ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n", + arvif->vdev_id, ret); + goto err_vdev_del; + } + + time_left = wait_for_completion_timeout(&ar->vdev_delete_done, + ATH12K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) { + ath12k_warn(ab, "Timeout in receiving vdev delete response\n"); + goto err_vdev_del; + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ar->monitor_vdev_id = -1; + ar->monitor_vdev_created = false; + } else if (ar->monitor_vdev_created && !ar->monitor_started) { + ret = ath12k_mac_monitor_vdev_delete(ar); + } + + ab->free_vdev_map |= 1LL << (arvif->vdev_id); + ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); + ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); + ar->num_created_vdevs--; + + ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", + vif->addr, arvif->vdev_id); + +err_vdev_del: + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); + + ath12k_peer_cleanup(ar, arvif->vdev_id); + + idr_for_each(&ar->txmgmt_idr, + ath12k_mac_vif_txmgmt_idr_remove, vif); + + ath12k_mac_vif_unref(&ab->dp, vif); + ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id); + + /* Recalc txpower for remaining vdev */ + ath12k_mac_txpower_recalc(ar); + clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + + /* TODO: recal traffic pause state based on the available vdevs */ + + mutex_unlock(&ar->conf_mutex); +} + +/* FIXME: Has to be verified. */ +#define SUPPORTED_FILTERS \ + (FIF_ALLMULTI | \ + FIF_CONTROL | \ + FIF_PSPOLL | \ + FIF_OTHER_BSS | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_PROBE_REQ | \ + FIF_FCSFAIL) + +static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct ath12k *ar = hw->priv; + bool reset_flag; + int ret; + + mutex_lock(&ar->conf_mutex); + + changed_flags &= SUPPORTED_FILTERS; + *total_flags &= SUPPORTED_FILTERS; + ar->filter_flags = *total_flags; + + /* For monitor mode */ + reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); + + ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); + if (!ret) { + if (!reset_flag) + set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + else + clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + } else { + ath12k_warn(ar->ab, + "fail to set monitor filter: %d\n", ret); + } + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", + changed_flags, *total_flags, reset_flag); + + mutex_unlock(&ar->conf_mutex); +} + +static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ + struct ath12k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + *tx_ant = ar->cfg_tx_chainmask; + *rx_ant = ar->cfg_rx_chainmask; + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct ath12k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct ath12k *ar = hw->priv; + int ret = -EINVAL; + + mutex_lock(&ar->conf_mutex); + + switch (params->action) { + case IEEE80211_AMPDU_RX_START: + ret = ath12k_dp_rx_ampdu_start(ar, params); + break; + case IEEE80211_AMPDU_RX_STOP: + ret = ath12k_dp_rx_ampdu_stop(ar, params); + break; + case IEEE80211_AMPDU_TX_START: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + case IEEE80211_AMPDU_TX_OPERATIONAL: + /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211 + * Tx aggregation requests. + */ + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx add freq %u width %d ptr %pK\n", + ctx->def.chan->center_freq, ctx->def.width, ctx); + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + /* TODO: In case of multiple channel context, populate rx_channel from + * Rx PPDU desc information. + */ + ar->rx_channel = ctx->def.chan; + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx remove freq %u width %d ptr %pK\n", + ctx->def.chan->center_freq, ctx->def.width, ctx); + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + /* TODO: In case of there is one more channel context left, populate + * rx_channel with the channel of that remaining channel context. + */ + ar->rx_channel = NULL; + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); +} + +static int +ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, + const struct cfg80211_chan_def *chandef, + bool restart) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct wmi_vdev_start_req_arg arg = {}; + int he_support = arvif->vif->bss_conf.he_support; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + arg.vdev_id = arvif->vdev_id; + arg.dtim_period = arvif->dtim_period; + arg.bcn_intval = arvif->beacon_interval; + + arg.freq = chandef->chan->center_freq; + arg.band_center_freq1 = chandef->center_freq1; + arg.band_center_freq2 = chandef->center_freq2; + arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; + + arg.min_power = 0; + arg.max_power = chandef->chan->max_power * 2; + arg.max_reg_power = chandef->chan->max_reg_power * 2; + arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + + arg.pref_tx_streams = ar->num_tx_chains; + arg.pref_rx_streams = ar->num_rx_chains; + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = arvif->u.ap.ssid; + arg.ssid_len = arvif->u.ap.ssid_len; + arg.hidden_ssid = arvif->u.ap.hidden_ssid; + + /* For now allow DFS for AP mode */ + arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + + arg.passive = arg.chan_radar; + + spin_lock_bh(&ab->base_lock); + arg.regdomain = ar->ab->dfs_region; + spin_unlock_bh(&ab->base_lock); + + /* TODO: Notify if secondary 80Mhz also needs radar detection */ + if (he_support) { + ret = ath12k_set_he_mu_sounding_mode(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to set he mode vdev %i\n", + arg.vdev_id); + return ret; + } + } + } + + arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac vdev %d start center_freq %d phymode %s\n", + arg.vdev_id, arg.freq, + ath12k_mac_phymode_str(arg.mode)); + + ret = ath12k_wmi_vdev_start(ar, &arg, restart); + if (ret) { + ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n", + restart ? "restart" : "start", arg.vdev_id); + return ret; + } + + ret = ath12k_mac_vdev_setup_sync(ar); + if (ret) { + ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n", + arg.vdev_id, restart ? "restart" : "start", ret); + return ret; + } + + ar->num_started_vdevs++; + ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", + arvif->vif->addr, arvif->vdev_id); + + /* Enable CAC Flag in the driver by checking the channel DFS cac time, + * i.e dfs_cac_ms value which will be valid only for radar channels + * and state as NL80211_DFS_USABLE which indicates CAC needs to be + * done before channel usage. This flags is used to drop rx packets. + * during CAC. + */ + /* TODO: Set the flag for other interface types as required */ + if (arvif->vdev_type == WMI_VDEV_TYPE_AP && + chandef->chan->dfs_cac_ms && + chandef->chan->dfs_state == NL80211_DFS_USABLE) { + set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); + ath12k_dbg(ab, ATH12K_DBG_MAC, + "CAC Started in chan_freq %d for vdev %d\n", + arg.freq, arg.vdev_id); + } + + ret = ath12k_mac_set_txbf_conf(arvif); + if (ret) + ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n", + arvif->vdev_id, ret); + + return 0; +} + +static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", + arvif->vdev_id, ret); + goto err; + } + + ret = ath12k_mac_vdev_setup_sync(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", + arvif->vdev_id, ret); + goto err; + } + + WARN_ON(ar->num_started_vdevs == 0); + + ar->num_started_vdevs--; + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", + arvif->vif->addr, arvif->vdev_id); + + if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { + clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", + arvif->vdev_id); + } + + return 0; +err: + return ret; +} + +static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, + const struct cfg80211_chan_def *chandef) +{ + return ath12k_mac_vdev_start_restart(arvif, chandef, false); +} + +static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, + const struct cfg80211_chan_def *chandef) +{ + return ath12k_mac_vdev_start_restart(arvif, chandef, true); +} + +struct ath12k_mac_change_chanctx_arg { + struct ieee80211_chanctx_conf *ctx; + struct ieee80211_vif_chanctx_switch *vifs; + int n_vifs; + int next_vif; +}; + +static void +ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath12k_mac_change_chanctx_arg *arg = data; + + if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) + return; + + arg->n_vifs++; +} + +static void +ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath12k_mac_change_chanctx_arg *arg = data; + struct ieee80211_chanctx_conf *ctx; + + ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); + if (ctx != arg->ctx) + return; + + if (WARN_ON(arg->next_vif == arg->n_vifs)) + return; + + arg->vifs[arg->next_vif].vif = vif; + arg->vifs[arg->next_vif].old_ctx = ctx; + arg->vifs[arg->next_vif].new_ctx = ctx; + arg->next_vif++; +} + +static void +ath12k_mac_update_vif_chan(struct ath12k *ar, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif; + int ret; + int i; + bool monitor_vif = false; + + lockdep_assert_held(&ar->conf_mutex); + + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) + monitor_vif = true; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", + arvif->vdev_id, + vifs[i].old_ctx->def.chan->center_freq, + vifs[i].new_ctx->def.chan->center_freq, + vifs[i].old_ctx->def.width, + vifs[i].new_ctx->def.width); + + if (WARN_ON(!arvif->is_started)) + continue; + + if (WARN_ON(!arvif->is_up)) + continue; + + ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) { + ath12k_warn(ab, "failed to down vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } + + /* All relevant vdevs are downed and associated channel resources + * should be available for the channel switch now. + */ + + /* TODO: Update ar->rx_channel */ + + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + if (WARN_ON(!arvif->is_started)) + continue; + + if (WARN_ON(!arvif->is_up)) + continue; + + ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); + if (ret) { + ath12k_warn(ab, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + + ret = ath12k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", + ret); + + ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath12k_warn(ab, "failed to bring vdev up %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } + + /* Restart the internal monitor vdev on new channel */ + if (!monitor_vif && ar->monitor_vdev_created) { + if (!ath12k_mac_monitor_stop(ar)) + ath12k_mac_monitor_start(ar); + } +} + +static void +ath12k_mac_update_active_vif_chan(struct ath12k *ar, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx }; + + lockdep_assert_held(&ar->conf_mutex); + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_change_chanctx_cnt_iter, + &arg); + if (arg.n_vifs == 0) + return; + + arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL); + if (!arg.vifs) + return; + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_change_chanctx_fill_iter, + &arg); + + ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); + + kfree(arg.vifs); +} + +static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx change freq %u width %d ptr %pK changed %x\n", + ctx->def.chan->center_freq, ctx->def.width, ctx, changed); + + /* This shouldn't really happen because channel switching should use + * switch_vif_chanctx(). + */ + if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) + goto unlock; + + if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) + ath12k_mac_update_active_vif_chan(ar, ctx); + + /* TODO: Recalc radar detection */ + +unlock: + mutex_unlock(&ar->conf_mutex); +} + +static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + + if (WARN_ON(arvif->is_started)) + return -EBUSY; + + ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def); + if (ret) { + ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", + arvif->vdev_id, vif->addr, + arvif->chanctx.def.chan->center_freq, ret); + return ret; + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id); + if (ret) { + ath12k_warn(ab, "failed put monitor up: %d\n", ret); + return ret; + } + } + + arvif->is_started = true; + + /* TODO: Setup ps and cts/rts protection */ + return 0; +} + +static int +ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + struct ath12k_wmi_peer_create_arg param; + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx assign ptr %pK vdev_id %i\n", + ctx, arvif->vdev_id); + + /* for some targets bss peer must be created before vdev_start */ + if (ab->hw_params->vdev_start_delay && + arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) { + memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); + ret = 0; + goto out; + } + + if (WARN_ON(arvif->is_started)) { + ret = -EBUSY; + goto out; + } + + if (ab->hw_params->vdev_start_delay && + (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) { + param.vdev_id = arvif->vdev_id; + param.peer_type = WMI_PEER_TYPE_DEFAULT; + param.peer_addr = ar->mac_addr; + + ret = ath12k_peer_create(ar, arvif, NULL, ¶m); + if (ret) { + ath12k_warn(ab, "failed to create peer after vdev start delay: %d", + ret); + goto out; + } + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath12k_mac_monitor_start(ar); + if (ret) + goto out; + arvif->is_started = true; + goto out; + } + + ret = ath12k_mac_vdev_start(arvif, &ctx->def); + if (ret) { + ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", + arvif->vdev_id, vif->addr, + ctx->def.chan->center_freq, ret); + goto out; + } + + if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) + ath12k_mac_monitor_start(ar); + + arvif->is_started = true; + + /* TODO: Setup ps and cts/rts protection */ + +out: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void +ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif = (void *)vif->drv_priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac chanctx unassign ptr %pK vdev_id %i\n", + ctx, arvif->vdev_id); + + WARN_ON(!arvif->is_started); + + if (ab->hw_params->vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && + ath12k_peer_find_by_addr(ab, ar->mac_addr)) + ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath12k_mac_monitor_stop(ar); + if (ret) { + mutex_unlock(&ar->conf_mutex); + return; + } + + arvif->is_started = false; + mutex_unlock(&ar->conf_mutex); + } + + ret = ath12k_mac_vdev_stop(arvif); + if (ret) + ath12k_warn(ab, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_started = false; + + if (ab->hw_params->vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) + ath12k_wmi_vdev_down(ar, arvif->vdev_id); + + if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + ar->num_started_vdevs == 1 && ar->monitor_vdev_created) + ath12k_mac_monitor_stop(ar); + + mutex_unlock(&ar->conf_mutex); +} + +static int +ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct ath12k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac chanctx switch n_vifs %d mode %d\n", + n_vifs, mode); + ath12k_mac_update_vif_chan(ar, vifs, n_vifs); + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static int +ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) +{ + struct ath12k_vif *arvif; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + list_for_each_entry(arvif, &ar->arvifs, list) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n", + param, arvif->vdev_id, value); + + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param, value); + if (ret) { + ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n", + param, arvif->vdev_id, ret); + break; + } + } + mutex_unlock(&ar->conf_mutex); + return ret; +} + +/* mac80211 stores device specific RTS/Fragmentation threshold value, + * this is set interface specific to firmware from ath12k driver + */ +static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct ath12k *ar = hw->priv; + int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; + + return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); +} + +static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +{ + /* Even though there's a WMI vdev param for fragmentation threshold no + * known firmware actually implements it. Moreover it is not possible to + * rely frame fragmentation to mac80211 because firmware clears the + * "more fragments" bit in frame control making it impossible for remote + * devices to reassemble frames. + * + * Hence implement a dummy callback just to say fragmentation isn't + * supported. This effectively prevents mac80211 from doing frame + * fragmentation in software. + */ + return -EOPNOTSUPP; +} + +static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct ath12k *ar = hw->priv; + long time_left; + + if (drop) + return; + + time_left = wait_event_timeout(ar->dp.tx_empty_waitq, + (atomic_read(&ar->dp.num_tx_pending) == 0), + ATH12K_FLUSH_TIMEOUT); + if (time_left == 0) + ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); +} + +static int +ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) + num_rates += hweight16(mask->control[band].ht_mcs[i]); + + return num_rates; +} + +static bool +ath12k_mac_has_single_legacy_rate(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + + num_rates = hweight32(mask->control[band].legacy); + + if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) + return false; + + if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) + return false; + + return num_rates == 1; +} + +static bool +ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + int *nss) +{ + struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; + u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + u8 ht_nss_mask = 0; + u8 vht_nss_mask = 0; + int i; + + /* No need to consider legacy here. Basic rates are always present + * in bitrate mask + */ + + for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { + if (mask->control[band].ht_mcs[i] == 0) + continue; + else if (mask->control[band].ht_mcs[i] == + sband->ht_cap.mcs.rx_mask[i]) + ht_nss_mask |= BIT(i); + else + return false; + } + + for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { + if (mask->control[band].vht_mcs[i] == 0) + continue; + else if (mask->control[band].vht_mcs[i] == + ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) + vht_nss_mask |= BIT(i); + else + return false; + } + + if (ht_nss_mask != vht_nss_mask) + return false; + + if (ht_nss_mask == 0) + return false; + + if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) + return false; + + *nss = fls(ht_nss_mask); + + return true; +} + +static int +ath12k_mac_get_single_legacy_rate(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + u32 *rate, u8 *nss) +{ + int rate_idx; + u16 bitrate; + u8 preamble; + u8 hw_rate; + + if (hweight32(mask->control[band].legacy) != 1) + return -EINVAL; + + rate_idx = ffs(mask->control[band].legacy) - 1; + + if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) + rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; + + hw_rate = ath12k_legacy_rates[rate_idx].hw_value; + bitrate = ath12k_legacy_rates[rate_idx].bitrate; + + if (ath12k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + *nss = 1; + *rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble); + + return 0; +} + +static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif, + u32 rate, u8 nss, u8 sgi, u8 ldpc) +{ + struct ath12k *ar = arvif->ar; + u32 vdev_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", + arvif->vdev_id, rate, nss, sgi); + + vdev_param = WMI_VDEV_PARAM_FIXED_RATE; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) { + ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", + rate, ret); + return ret; + } + + vdev_param = WMI_VDEV_PARAM_NSS; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, nss); + if (ret) { + ath12k_warn(ar->ab, "failed to set nss param %d: %d\n", + nss, ret); + return ret; + } + + vdev_param = WMI_VDEV_PARAM_SGI; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, sgi); + if (ret) { + ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n", + sgi, ret); + return ret; + } + + vdev_param = WMI_VDEV_PARAM_LDPC; + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, ldpc); + if (ret) { + ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n", + ldpc, ret); + return ret; + } + + return 0; +} + +static bool +ath12k_mac_vht_mcs_range_present(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int i; + u16 vht_mcs; + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { + vht_mcs = mask->control[band].vht_mcs[i]; + + switch (vht_mcs) { + case 0: + case BIT(8) - 1: + case BIT(9) - 1: + case BIT(10) - 1: + break; + default: + return false; + } + } + + return true; +} + +static void ath12k_mac_set_bitrate_mask_iter(void *data, + struct ieee80211_sta *sta) +{ + struct ath12k_vif *arvif = data; + struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); + arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(ar->hw, &arsta->update_wk); +} + +static void ath12k_mac_disable_peer_fixed_rate(void *data, + struct ieee80211_sta *sta) +{ + struct ath12k_vif *arvif = data; + struct ath12k *ar = arvif->ar; + int ret; + + ret = ath12k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_PARAM_FIXED_RATE, + WMI_FIXED_RATE_NONE); + if (ret) + ath12k_warn(ar->ab, + "failed to disable peer fixed rate for STA %pM ret %d\n", + sta->addr, ret); +} + +static int +ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + struct ath12k *ar = arvif->ar; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u32 rate; + u8 nss; + u8 sgi; + u8 ldpc; + int single_nss; + int ret; + int num_rates; + + if (ath12k_mac_vif_chan(vif, &def)) + return -EPERM; + + band = def.chan->band; + ht_mcs_mask = mask->control[band].ht_mcs; + vht_mcs_mask = mask->control[band].vht_mcs; + ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); + + sgi = mask->control[band].gi; + if (sgi == NL80211_TXRATE_FORCE_LGI) + return -EINVAL; + + /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it + * requires passing at least one of used basic rates along with them. + * Fixed rate setting across different preambles(legacy, HT, VHT) is + * not supported by the FW. Hence use of FIXED_RATE vdev param is not + * suitable for setting single HT/VHT rates. + * But, there could be a single basic rate passed from userspace which + * can be done through the FIXED_RATE param. + */ + if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) { + ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate, + &nss); + if (ret) { + ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + ieee80211_iterate_stations_atomic(ar->hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); + } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask, + &single_nss)) { + rate = WMI_FIXED_RATE_NONE; + nss = single_nss; + } else { + rate = WMI_FIXED_RATE_NONE; + nss = min_t(u32, ar->num_tx_chains, + max(ath12k_mac_max_ht_nss(ht_mcs_mask), + ath12k_mac_max_vht_nss(vht_mcs_mask))); + + /* If multiple rates across different preambles are given + * we can reconfigure this info with all peers using PEER_ASSOC + * command with the below exception cases. + * - Single VHT Rate : peer_assoc command accommodates only MCS + * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211 + * mandates passing basic rates along with HT/VHT rates, FW + * doesn't allow switching from VHT to Legacy. Hence instead of + * setting legacy and VHT rates using RATEMASK_CMD vdev cmd, + * we could set this VHT rate as peer fixed rate param, which + * will override FIXED rate and FW rate control algorithm. + * If single VHT rate is passed along with HT rates, we select + * the VHT rate as fixed rate for vht peers. + * - Multiple VHT Rates : When Multiple VHT rates are given,this + * can be set using RATEMASK CMD which uses FW rate-ctl alg. + * TODO: Setting multiple VHT MCS and replacing peer_assoc with + * RATEMASK_CMDID can cover all use cases of setting rates + * across multiple preambles and rates within same type. + * But requires more validation of the command at this point. + */ + + num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, + mask); + + if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) && + num_rates > 1) { + /* TODO: Handle multiple VHT MCS values setting using + * RATEMASK CMD + */ + ath12k_warn(ar->ab, + "Setting more than one MCS Value in bitrate mask not supported\n"); + return -EINVAL; + } + + ieee80211_iterate_stations_atomic(ar->hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); + + mutex_lock(&ar->conf_mutex); + + arvif->bitrate_mask = *mask; + ieee80211_iterate_stations_atomic(ar->hw, + ath12k_mac_set_bitrate_mask_iter, + arvif); + + mutex_unlock(&ar->conf_mutex); + } + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); + if (ret) { + ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", + arvif->vdev_id, ret); + } + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void +ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct ath12k *ar = hw->priv; + struct ath12k_base *ab = ar->ab; + int recovery_count; + + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH12K_STATE_RESTARTED) { + ath12k_warn(ar->ab, "pdev %d successfully recovered\n", + ar->pdev->pdev_id); + ar->state = ATH12K_STATE_ON; + ieee80211_wake_queues(ar->hw); + + if (ab->is_reset) { + recovery_count = atomic_inc_return(&ab->recovery_count); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n", + recovery_count); + /* When there are multiple radios in an SOC, + * the recovery has to be done for each radio + */ + if (recovery_count == ab->num_radios) { + atomic_dec(&ab->reset_count); + complete(&ab->reset_complete); + ab->is_reset = false; + atomic_set(&ab->fail_cont_count, 0); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); + } + } + } + + mutex_unlock(&ar->conf_mutex); +} + +static void +ath12k_mac_update_bss_chan_survey(struct ath12k *ar, + struct ieee80211_channel *channel) +{ + int ret; + enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || + ar->rx_channel != channel) + return; + + if (ar->scan.state != ATH12K_SCAN_IDLE) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "ignoring bss chan info req while scanning..\n"); + return; + } + + reinit_completion(&ar->bss_survey_done); + + ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type); + if (ret) { + ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n"); + return; + } + + ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); + if (ret == 0) + ath12k_warn(ar->ab, "bss channel survey timed out\n"); +} + +static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ath12k *ar = hw->priv; + struct ieee80211_supported_band *sband; + struct survey_info *ar_survey; + int ret = 0; + + if (idx >= ATH12K_NUM_CHANS) + return -ENOENT; + + ar_survey = &ar->survey[idx]; + + mutex_lock(&ar->conf_mutex); + + sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) { + ret = -ENOENT; + goto exit; + } + + ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); + + spin_lock_bh(&ar->data_lock); + memcpy(survey, ar_survey, sizeof(*survey)); + spin_unlock_bh(&ar->data_lock); + + survey->channel = &sband->channels[idx]; + + if (ar->rx_channel == survey->channel) + survey->filled |= SURVEY_INFO_IN_USE; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); + + sinfo->tx_duration = arsta->tx_duration; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); + + if (!arsta->txrate.legacy && !arsta->txrate.nss) + return; + + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + sinfo->txrate.he_gi = arsta->txrate.he_gi; + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + + /* TODO: Use real NF instead of default one. */ + sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); +} + +static const struct ieee80211_ops ath12k_ops = { + .tx = ath12k_mac_op_tx, + .wake_tx_queue = ieee80211_handle_wake_tx_queue, + .start = ath12k_mac_op_start, + .stop = ath12k_mac_op_stop, + .reconfig_complete = ath12k_mac_op_reconfig_complete, + .add_interface = ath12k_mac_op_add_interface, + .remove_interface = ath12k_mac_op_remove_interface, + .update_vif_offload = ath12k_mac_op_update_vif_offload, + .config = ath12k_mac_op_config, + .bss_info_changed = ath12k_mac_op_bss_info_changed, + .configure_filter = ath12k_mac_op_configure_filter, + .hw_scan = ath12k_mac_op_hw_scan, + .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, + .set_key = ath12k_mac_op_set_key, + .sta_state = ath12k_mac_op_sta_state, + .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, + .sta_rc_update = ath12k_mac_op_sta_rc_update, + .conf_tx = ath12k_mac_op_conf_tx, + .set_antenna = ath12k_mac_op_set_antenna, + .get_antenna = ath12k_mac_op_get_antenna, + .ampdu_action = ath12k_mac_op_ampdu_action, + .add_chanctx = ath12k_mac_op_add_chanctx, + .remove_chanctx = ath12k_mac_op_remove_chanctx, + .change_chanctx = ath12k_mac_op_change_chanctx, + .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx, + .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx, + .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx, + .set_rts_threshold = ath12k_mac_op_set_rts_threshold, + .set_frag_threshold = ath12k_mac_op_set_frag_threshold, + .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask, + .get_survey = ath12k_mac_op_get_survey, + .flush = ath12k_mac_op_flush, + .sta_statistics = ath12k_mac_op_sta_statistics, +}; + +static void ath12k_mac_update_ch_list(struct ath12k *ar, + struct ieee80211_supported_band *band, + u32 freq_low, u32 freq_high) +{ + int i; + + if (!(freq_low && freq_high)) + return; + + for (i = 0; i < band->n_channels; i++) { + if (band->channels[i].center_freq < freq_low || + band->channels[i].center_freq > freq_high) + band->channels[i].flags |= IEEE80211_CHAN_DISABLED; + } +} + +static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) +{ + struct ath12k_pdev *pdev = ar->pdev; + struct ath12k_pdev_cap *pdev_cap = &pdev->cap; + + if (band == WMI_HOST_WLAN_2G_CAP) + return pdev_cap->band[NL80211_BAND_2GHZ].phy_id; + + if (band == WMI_HOST_WLAN_5G_CAP) + return pdev_cap->band[NL80211_BAND_5GHZ].phy_id; + + ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band); + + return 0; +} + +static int ath12k_mac_setup_channels_rates(struct ath12k *ar, + u32 supported_bands) +{ + struct ieee80211_supported_band *band; + struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; + void *channels; + u32 phy_id; + + BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) + + ARRAY_SIZE(ath12k_5ghz_channels) + + ARRAY_SIZE(ath12k_6ghz_channels)) != + ATH12K_NUM_CHANS); + + reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; + + if (supported_bands & WMI_HOST_WLAN_2G_CAP) { + channels = kmemdup(ath12k_2ghz_channels, + sizeof(ath12k_2ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; + + band = &ar->mac.sbands[NL80211_BAND_2GHZ]; + band->band = NL80211_BAND_2GHZ; + band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels); + band->channels = channels; + band->n_bitrates = ath12k_g_rates_size; + band->bitrates = ath12k_g_rates; + ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; + + if (ar->ab->hw_params->single_pdev_only) { + phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); + reg_cap = &ar->ab->hal_reg_cap[phy_id]; + } + ath12k_mac_update_ch_list(ar, band, + reg_cap->low_2ghz_chan, + reg_cap->high_2ghz_chan); + } + + if (supported_bands & WMI_HOST_WLAN_5G_CAP) { + if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) { + channels = kmemdup(ath12k_6ghz_channels, + sizeof(ath12k_6ghz_channels), GFP_KERNEL); + if (!channels) { + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + return -ENOMEM; + } + + ar->supports_6ghz = true; + band = &ar->mac.sbands[NL80211_BAND_6GHZ]; + band->band = NL80211_BAND_6GHZ; + band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels); + band->channels = channels; + band->n_bitrates = ath12k_a_rates_size; + band->bitrates = ath12k_a_rates; + ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; + ath12k_mac_update_ch_list(ar, band, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); + } + + if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) { + channels = kmemdup(ath12k_5ghz_channels, + sizeof(ath12k_5ghz_channels), + GFP_KERNEL); + if (!channels) { + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + return -ENOMEM; + } + + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; + band->band = NL80211_BAND_5GHZ; + band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels); + band->channels = channels; + band->n_bitrates = ath12k_a_rates_size; + band->bitrates = ath12k_a_rates; + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; + + if (ar->ab->hw_params->single_pdev_only) { + phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); + reg_cap = &ar->ab->hal_reg_cap[phy_id]; + } + + ath12k_mac_update_ch_list(ar, band, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); + } + } + + return 0; +} + +static int ath12k_mac_setup_iface_combinations(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_iface_combination *combinations; + struct ieee80211_iface_limit *limits; + int n_limits, max_interfaces; + bool ap, mesh; + + ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP); + + mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && + ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT); + + combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); + if (!combinations) + return -ENOMEM; + + if (ap || mesh) { + n_limits = 2; + max_interfaces = 16; + } else { + n_limits = 1; + max_interfaces = 1; + } + + limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); + if (!limits) { + kfree(combinations); + return -ENOMEM; + } + + limits[0].max = 1; + limits[0].types |= BIT(NL80211_IFTYPE_STATION); + + if (ap) { + limits[1].max = max_interfaces; + limits[1].types |= BIT(NL80211_IFTYPE_AP); + } + + if (mesh) + limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); + + combinations[0].limits = limits; + combinations[0].n_limits = n_limits; + combinations[0].max_interfaces = max_interfaces; + combinations[0].num_different_channels = 1; + combinations[0].beacon_int_infra_match = true; + combinations[0].beacon_int_min_gcd = 100; + combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80); + + ar->hw->wiphy->iface_combinations = combinations; + ar->hw->wiphy->n_iface_combinations = 1; + + return 0; +} + +static const u8 ath12k_if_types_ext_capa[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, +}; + +static const u8 ath12k_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +static const u8 ath12k_if_types_ext_capa_ap[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, +}; + +static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { + { + .extended_capabilities = ath12k_if_types_ext_capa, + .extended_capabilities_mask = ath12k_if_types_ext_capa, + .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa), + }, { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = ath12k_if_types_ext_capa_sta, + .extended_capabilities_mask = ath12k_if_types_ext_capa_sta, + .extended_capabilities_len = + sizeof(ath12k_if_types_ext_capa_sta), + }, { + .iftype = NL80211_IFTYPE_AP, + .extended_capabilities = ath12k_if_types_ext_capa_ap, + .extended_capabilities_mask = ath12k_if_types_ext_capa_ap, + .extended_capabilities_len = + sizeof(ath12k_if_types_ext_capa_ap), + }, +}; + +static void __ath12k_mac_unregister(struct ath12k *ar) +{ + cancel_work_sync(&ar->regd_update_work); + + ieee80211_unregister_hw(ar->hw); + + idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + + kfree(ar->hw->wiphy->iface_combinations[0].limits); + kfree(ar->hw->wiphy->iface_combinations); + + SET_IEEE80211_DEV(ar->hw, NULL); +} + +void ath12k_mac_unregister(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev *pdev; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar) + continue; + + __ath12k_mac_unregister(ar); + } +} + +static int __ath12k_mac_register(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev_cap *cap = &ar->pdev->cap; + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256, + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + WLAN_CIPHER_SUITE_CCMP_256, + }; + int ret; + u32 ht_cap = 0; + + ath12k_pdev_caps_update(ar); + + SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); + + SET_IEEE80211_DEV(ar->hw, ab->dev); + + ret = ath12k_mac_setup_channels_rates(ar, + cap->supported_bands); + if (ret) + goto err; + + ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); + ath12k_mac_setup_he_cap(ar, cap); + + ret = ath12k_mac_setup_iface_combinations(ar); + if (ret) { + ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); + goto err_free_channels; + } + + ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; + ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; + + ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes; + + ieee80211_hw_set(ar->hw, SIGNAL_DBM); + ieee80211_hw_set(ar->hw, SUPPORTS_PS); + ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(ar->hw, MFP_CAPABLE); + ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); + ieee80211_hw_set(ar->hw, AP_LINK_PS); + ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); + ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); + ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); + ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); + ieee80211_hw_set(ar->hw, QUEUE_CONTROL); + ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); + ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); + + if (ht_cap & WMI_HT_CAP_ENABLED) { + ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); + ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER); + ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(ar->hw, USES_RSS); + } + + ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; + ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + + /* TODO: Check if HT capability advertised from firmware is different + * for each band for a dual band capable radio. It will be tricky to + * handle it when the ht capability different for each band. + */ + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) + ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; + + ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; + ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; + + ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL; + + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + ar->hw->wiphy->max_remain_on_channel_duration = 5000; + + ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_AP_SCAN; + + ar->max_num_stations = TARGET_NUM_STATIONS; + ar->max_num_peers = TARGET_NUM_PEERS_PDEV; + + ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; + + ar->hw->queues = ATH12K_HW_MAX_QUEUES; + ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN; + ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; + ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + + ar->hw->vif_data_size = sizeof(struct ath12k_vif); + ar->hw->sta_data_size = sizeof(struct ath12k_sta); + + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); + + ar->hw->wiphy->cipher_suites = cipher_suites; + ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; + ar->hw->wiphy->num_iftype_ext_capab = + ARRAY_SIZE(ath12k_iftypes_ext_capa); + + if (ar->supports_6ghz) { + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_FILS_DISCOVERY); + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); + } + + ath12k_reg_init(ar); + + if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { + ar->hw->netdev_features = NETIF_F_HW_CSUM; + ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); + ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); + } + + ret = ieee80211_register_hw(ar->hw); + if (ret) { + ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); + goto err_free_if_combs; + } + + if (!ab->hw_params->supports_monitor) + /* There's a race between calling ieee80211_register_hw() + * and here where the monitor mode is enabled for a little + * while. But that time is so short and in practise it make + * a difference in real life. + */ + ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); + + /* Apply the regd received during initialization */ + ret = ath12k_regd_update(ar, true); + if (ret) { + ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); + goto err_unregister_hw; + } + + return 0; + +err_unregister_hw: + ieee80211_unregister_hw(ar->hw); + +err_free_if_combs: + kfree(ar->hw->wiphy->iface_combinations[0].limits); + kfree(ar->hw->wiphy->iface_combinations); + +err_free_channels: + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + +err: + SET_IEEE80211_DEV(ar->hw, NULL); + return ret; +} + +int ath12k_mac_register(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev *pdev; + int i; + int ret; + + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (ab->pdevs_macaddr_valid) { + ether_addr_copy(ar->mac_addr, pdev->mac_addr); + } else { + ether_addr_copy(ar->mac_addr, ab->mac_addr); + ar->mac_addr[4] += i; + } + + ret = __ath12k_mac_register(ar); + if (ret) + goto err_cleanup; + + idr_init(&ar->txmgmt_idr); + spin_lock_init(&ar->txmgmt_idr_lock); + } + + /* Initialize channel counters frequency value in hertz */ + ab->cc_freq_hz = 320000; + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + + return 0; + +err_cleanup: + for (i = i - 1; i >= 0; i--) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + __ath12k_mac_unregister(ar); + } + + return ret; +} + +int ath12k_mac_allocate(struct ath12k_base *ab) +{ + struct ieee80211_hw *hw; + struct ath12k *ar; + struct ath12k_pdev *pdev; + int ret; + int i; + + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops); + if (!hw) { + ath12k_warn(ab, "failed to allocate mac80211 hw device\n"); + ret = -ENOMEM; + goto err_free_mac; + } + + ar = hw->priv; + ar->hw = hw; + ar->ab = ab; + ar->pdev = pdev; + ar->pdev_idx = i; + ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i); + + ar->wmi = &ab->wmi_ab.wmi[i]; + /* FIXME: wmi[0] is already initialized during attach, + * Should we do this again? + */ + ath12k_wmi_pdev_attach(ab, i); + + ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; + ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; + ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); + ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); + + pdev->ar = ar; + spin_lock_init(&ar->data_lock); + INIT_LIST_HEAD(&ar->arvifs); + INIT_LIST_HEAD(&ar->ppdu_stats_info); + mutex_init(&ar->conf_mutex); + init_completion(&ar->vdev_setup_done); + init_completion(&ar->vdev_delete_done); + init_completion(&ar->peer_assoc_done); + init_completion(&ar->peer_delete_done); + init_completion(&ar->install_key_done); + init_completion(&ar->bss_survey_done); + init_completion(&ar->scan.started); + init_completion(&ar->scan.completed); + + INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); + INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); + + INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); + clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + } + + return 0; + +err_free_mac: + ath12k_mac_destroy(ab); + + return ret; +} + +void ath12k_mac_destroy(struct ath12k_base *ab) +{ + struct ath12k *ar; + struct ath12k_pdev *pdev; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar) + continue; + + ieee80211_free_hw(ar->hw); + pdev->ar = NULL; + } +} diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h new file mode 100644 index 000000000000..57f4295420bb --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_MAC_H +#define ATH12K_MAC_H + +#include <net/mac80211.h> +#include <net/cfg80211.h> + +struct ath12k; +struct ath12k_base; + +struct ath12k_generic_iter { + struct ath12k *ar; + int ret; +}; + +/* number of failed packets (20 packets with 16 sw reties each) */ +#define ATH12K_KICKOUT_THRESHOLD (20 * 16) + +/* Use insanely high numbers to make sure that the firmware implementation + * won't start, we have the same functionality already in hostapd. Unit + * is seconds. + */ +#define ATH12K_KEEPALIVE_MIN_IDLE 3747 +#define ATH12K_KEEPALIVE_MAX_IDLE 3895 +#define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE 3900 + +/* FIXME: should these be in ieee80211.h? */ +#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16) +#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24) + +#define ATH12K_CHAN_WIDTH_NUM 8 + +#define ATH12K_TX_POWER_MAX_VAL 70 +#define ATH12K_TX_POWER_MIN_VAL 0 + +enum ath12k_supported_bw { + ATH12K_BW_20 = 0, + ATH12K_BW_40 = 1, + ATH12K_BW_80 = 2, + ATH12K_BW_160 = 3, +}; + +extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default; + +void ath12k_mac_destroy(struct ath12k_base *ab); +void ath12k_mac_unregister(struct ath12k_base *ab); +int ath12k_mac_register(struct ath12k_base *ab); +int ath12k_mac_allocate(struct ath12k_base *ab); +int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, + u16 *rate); +u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate); +u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck); + +void __ath12k_mac_scan_finish(struct ath12k *ar); +void ath12k_mac_scan_finish(struct ath12k *ar); + +struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id); +struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id); +struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id); +struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id); + +void ath12k_mac_drain_tx(struct ath12k *ar); +void ath12k_mac_peer_cleanup_all(struct ath12k *ar); +int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); +enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw); +enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw); +enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher); +#endif diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c new file mode 100644 index 000000000000..42f1140baa4f --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -0,0 +1,616 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/msi.h> +#include <linux/pci.h> + +#include "core.h" +#include "debug.h" +#include "mhi.h" +#include "pci.h" + +#define MHI_TIMEOUT_DEFAULT_MS 90000 + +static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = { + { + .num = 0, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 1, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 20, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 21, + .name = "IPCR", + .num_elements = 32, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = true, + }, +}; + +static struct mhi_event_config ath12k_mhi_events_qcn9274[] = { + { + .num_elements = 32, + .irq_moderation_ms = 0, + .irq = 1, + .data_type = MHI_ER_CTRL, + .mode = MHI_DB_BRST_DISABLE, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, + { + .num_elements = 256, + .irq_moderation_ms = 1, + .irq = 2, + .mode = MHI_DB_BRST_DISABLE, + .priority = 1, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, +}; + +const struct mhi_controller_config ath12k_mhi_config_qcn9274 = { + .max_channels = 30, + .timeout_ms = 10000, + .use_bounce_buf = false, + .buf_len = 0, + .num_channels = ARRAY_SIZE(ath12k_mhi_channels_qcn9274), + .ch_cfg = ath12k_mhi_channels_qcn9274, + .num_events = ARRAY_SIZE(ath12k_mhi_events_qcn9274), + .event_cfg = ath12k_mhi_events_qcn9274, +}; + +static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = { + { + .num = 0, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 0, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 1, + .name = "LOOPBACK", + .num_elements = 32, + .event_ring = 0, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 20, + .name = "IPCR", + .num_elements = 64, + .event_ring = 1, + .dir = DMA_TO_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + }, + { + .num = 21, + .name = "IPCR", + .num_elements = 64, + .event_ring = 1, + .dir = DMA_FROM_DEVICE, + .ee_mask = 0x4, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = true, + }, +}; + +static struct mhi_event_config ath12k_mhi_events_wcn7850[] = { + { + .num_elements = 32, + .irq_moderation_ms = 0, + .irq = 1, + .mode = MHI_DB_BRST_DISABLE, + .data_type = MHI_ER_CTRL, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, + { + .num_elements = 256, + .irq_moderation_ms = 1, + .irq = 2, + .mode = MHI_DB_BRST_DISABLE, + .priority = 1, + .hardware_event = false, + .client_managed = false, + .offload_channel = false, + }, +}; + +const struct mhi_controller_config ath12k_mhi_config_wcn7850 = { + .max_channels = 128, + .timeout_ms = 2000, + .use_bounce_buf = false, + .buf_len = 0, + .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850), + .ch_cfg = ath12k_mhi_channels_wcn7850, + .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850), + .event_cfg = ath12k_mhi_events_wcn7850, +}; + +void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab) +{ + u32 val; + + val = ath12k_pci_read32(ab, MHISTATUS); + + ath12k_dbg(ab, ATH12K_DBG_PCI, "MHISTATUS 0x%x\n", val); + + /* Observed on some targets that after SOC_GLOBAL_RESET, MHISTATUS + * has SYSERR bit set and thus need to set MHICTRL_RESET + * to clear SYSERR. + */ + ath12k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK); + + mdelay(10); +} + +static void ath12k_mhi_reset_txvecdb(struct ath12k_base *ab) +{ + ath12k_pci_write32(ab, PCIE_TXVECDB, 0); +} + +static void ath12k_mhi_reset_txvecstatus(struct ath12k_base *ab) +{ + ath12k_pci_write32(ab, PCIE_TXVECSTATUS, 0); +} + +static void ath12k_mhi_reset_rxvecdb(struct ath12k_base *ab) +{ + ath12k_pci_write32(ab, PCIE_RXVECDB, 0); +} + +static void ath12k_mhi_reset_rxvecstatus(struct ath12k_base *ab) +{ + ath12k_pci_write32(ab, PCIE_RXVECSTATUS, 0); +} + +void ath12k_mhi_clear_vector(struct ath12k_base *ab) +{ + ath12k_mhi_reset_txvecdb(ab); + ath12k_mhi_reset_txvecstatus(ab); + ath12k_mhi_reset_rxvecdb(ab); + ath12k_mhi_reset_rxvecstatus(ab); +} + +static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci) +{ + struct ath12k_base *ab = ab_pci->ab; + u32 user_base_data, base_vector; + int ret, num_vectors, i; + int *irq; + + ret = ath12k_pci_get_user_msi_assignment(ab, + "MHI", &num_vectors, + &user_base_data, &base_vector); + if (ret) + return ret; + + ath12k_dbg(ab, ATH12K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n", + num_vectors, base_vector); + + irq = kcalloc(num_vectors, sizeof(*irq), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + for (i = 0; i < num_vectors; i++) + irq[i] = ath12k_pci_get_msi_irq(ab->dev, + base_vector + i); + + ab_pci->mhi_ctrl->irq = irq; + ab_pci->mhi_ctrl->nr_irqs = num_vectors; + + return 0; +} + +static int ath12k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static void ath12k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) +{ +} + +static char *ath12k_mhi_op_callback_to_str(enum mhi_callback reason) +{ + switch (reason) { + case MHI_CB_IDLE: + return "MHI_CB_IDLE"; + case MHI_CB_PENDING_DATA: + return "MHI_CB_PENDING_DATA"; + case MHI_CB_LPM_ENTER: + return "MHI_CB_LPM_ENTER"; + case MHI_CB_LPM_EXIT: + return "MHI_CB_LPM_EXIT"; + case MHI_CB_EE_RDDM: + return "MHI_CB_EE_RDDM"; + case MHI_CB_EE_MISSION_MODE: + return "MHI_CB_EE_MISSION_MODE"; + case MHI_CB_SYS_ERROR: + return "MHI_CB_SYS_ERROR"; + case MHI_CB_FATAL_ERROR: + return "MHI_CB_FATAL_ERROR"; + case MHI_CB_BW_REQ: + return "MHI_CB_BW_REQ"; + default: + return "UNKNOWN"; + } +} + +static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb) +{ + struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n", + ath12k_mhi_op_callback_to_str(cb)); + + switch (cb) { + case MHI_CB_SYS_ERROR: + ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); + break; + case MHI_CB_EE_RDDM: + if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) + queue_work(ab->workqueue_aux, &ab->reset_work); + break; + default: + break; + } +} + +static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, + u32 *out) +{ + *out = readl(addr); + + return 0; +} + +static void ath12k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, + u32 val) +{ + writel(val, addr); +} + +int ath12k_mhi_register(struct ath12k_pci *ab_pci) +{ + struct ath12k_base *ab = ab_pci->ab; + struct mhi_controller *mhi_ctrl; + int ret; + + mhi_ctrl = mhi_alloc_controller(); + if (!mhi_ctrl) + return -ENOMEM; + + ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE, + ab_pci->amss_path, + sizeof(ab_pci->amss_path)); + + ab_pci->mhi_ctrl = mhi_ctrl; + mhi_ctrl->cntrl_dev = ab->dev; + mhi_ctrl->fw_image = ab_pci->amss_path; + mhi_ctrl->regs = ab->mem; + mhi_ctrl->reg_len = ab->mem_len; + + ret = ath12k_mhi_get_msi(ab_pci); + if (ret) { + ath12k_err(ab, "failed to get msi for mhi\n"); + mhi_free_controller(mhi_ctrl); + return ret; + } + + mhi_ctrl->iova_start = 0; + mhi_ctrl->iova_stop = 0xffffffff; + mhi_ctrl->sbl_size = SZ_512K; + mhi_ctrl->seg_len = SZ_512K; + mhi_ctrl->fbc_download = true; + mhi_ctrl->runtime_get = ath12k_mhi_op_runtime_get; + mhi_ctrl->runtime_put = ath12k_mhi_op_runtime_put; + mhi_ctrl->status_cb = ath12k_mhi_op_status_cb; + mhi_ctrl->read_reg = ath12k_mhi_op_read_reg; + mhi_ctrl->write_reg = ath12k_mhi_op_write_reg; + + ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config); + if (ret) { + ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret); + mhi_free_controller(mhi_ctrl); + return ret; + } + + return 0; +} + +void ath12k_mhi_unregister(struct ath12k_pci *ab_pci) +{ + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + + mhi_unregister_controller(mhi_ctrl); + kfree(mhi_ctrl->irq); + mhi_free_controller(mhi_ctrl); + ab_pci->mhi_ctrl = NULL; +} + +static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state) +{ + switch (mhi_state) { + case ATH12K_MHI_INIT: + return "INIT"; + case ATH12K_MHI_DEINIT: + return "DEINIT"; + case ATH12K_MHI_POWER_ON: + return "POWER_ON"; + case ATH12K_MHI_POWER_OFF: + return "POWER_OFF"; + case ATH12K_MHI_FORCE_POWER_OFF: + return "FORCE_POWER_OFF"; + case ATH12K_MHI_SUSPEND: + return "SUSPEND"; + case ATH12K_MHI_RESUME: + return "RESUME"; + case ATH12K_MHI_TRIGGER_RDDM: + return "TRIGGER_RDDM"; + case ATH12K_MHI_RDDM_DONE: + return "RDDM_DONE"; + default: + return "UNKNOWN"; + } +}; + +static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci, + enum ath12k_mhi_state mhi_state) +{ + struct ath12k_base *ab = ab_pci->ab; + + switch (mhi_state) { + case ATH12K_MHI_INIT: + set_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state); + break; + case ATH12K_MHI_DEINIT: + clear_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state); + break; + case ATH12K_MHI_POWER_ON: + set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); + break; + case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_FORCE_POWER_OFF: + clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); + clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); + clear_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state); + break; + case ATH12K_MHI_SUSPEND: + set_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state); + break; + case ATH12K_MHI_RESUME: + clear_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state); + break; + case ATH12K_MHI_TRIGGER_RDDM: + set_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); + break; + case ATH12K_MHI_RDDM_DONE: + set_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state); + break; + default: + ath12k_err(ab, "unhandled mhi state (%d)\n", mhi_state); + } +} + +static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci, + enum ath12k_mhi_state mhi_state) +{ + struct ath12k_base *ab = ab_pci->ab; + + switch (mhi_state) { + case ATH12K_MHI_INIT: + if (!test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_DEINIT: + case ATH12K_MHI_POWER_ON: + if (test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state) && + !test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_FORCE_POWER_OFF: + if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_SUSPEND: + if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) && + !test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_RESUME: + if (test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_TRIGGER_RDDM: + if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) && + !test_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state)) + return 0; + break; + case ATH12K_MHI_RDDM_DONE: + return 0; + default: + ath12k_err(ab, "unhandled mhi state: %s(%d)\n", + ath12k_mhi_state_to_str(mhi_state), mhi_state); + } + + ath12k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n", + ath12k_mhi_state_to_str(mhi_state), mhi_state, + ab_pci->mhi_state); + + return -EINVAL; +} + +static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci, + enum ath12k_mhi_state mhi_state) +{ + struct ath12k_base *ab = ab_pci->ab; + int ret; + + ret = ath12k_mhi_check_state_bit(ab_pci, mhi_state); + if (ret) + goto out; + + ath12k_dbg(ab, ATH12K_DBG_PCI, "setting mhi state: %s(%d)\n", + ath12k_mhi_state_to_str(mhi_state), mhi_state); + + switch (mhi_state) { + case ATH12K_MHI_INIT: + ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl); + break; + case ATH12K_MHI_DEINIT: + mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); + ret = 0; + break; + case ATH12K_MHI_POWER_ON: + ret = mhi_async_power_up(ab_pci->mhi_ctrl); + break; + case ATH12K_MHI_POWER_OFF: + mhi_power_down(ab_pci->mhi_ctrl, true); + ret = 0; + break; + case ATH12K_MHI_FORCE_POWER_OFF: + mhi_power_down(ab_pci->mhi_ctrl, false); + ret = 0; + break; + case ATH12K_MHI_SUSPEND: + ret = mhi_pm_suspend(ab_pci->mhi_ctrl); + break; + case ATH12K_MHI_RESUME: + ret = mhi_pm_resume(ab_pci->mhi_ctrl); + break; + case ATH12K_MHI_TRIGGER_RDDM: + ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl); + break; + case ATH12K_MHI_RDDM_DONE: + break; + default: + ath12k_err(ab, "unhandled MHI state (%d)\n", mhi_state); + ret = -EINVAL; + } + + if (ret) + goto out; + + ath12k_mhi_set_state_bit(ab_pci, mhi_state); + + return 0; + +out: + ath12k_err(ab, "failed to set mhi state: %s(%d)\n", + ath12k_mhi_state_to_str(mhi_state), mhi_state); + return ret; +} + +int ath12k_mhi_start(struct ath12k_pci *ab_pci) +{ + int ret; + + ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS; + + ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_INIT); + if (ret) + goto out; + + ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_ON); + if (ret) + goto out; + + return 0; + +out: + return ret; +} + +void ath12k_mhi_stop(struct ath12k_pci *ab_pci) +{ + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF); + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT); +} + +void ath12k_mhi_suspend(struct ath12k_pci *ab_pci) +{ + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_SUSPEND); +} + +void ath12k_mhi_resume(struct ath12k_pci *ab_pci) +{ + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME); +} diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h new file mode 100644 index 000000000000..ebc23640ce7a --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/mhi.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _ATH12K_MHI_H +#define _ATH12K_MHI_H + +#include "pci.h" + +#define PCIE_TXVECDB 0x360 +#define PCIE_TXVECSTATUS 0x368 +#define PCIE_RXVECDB 0x394 +#define PCIE_RXVECSTATUS 0x39C + +#define MHISTATUS 0x48 +#define MHICTRL 0x38 +#define MHICTRL_RESET_MASK 0x2 + +enum ath12k_mhi_state { + ATH12K_MHI_INIT, + ATH12K_MHI_DEINIT, + ATH12K_MHI_POWER_ON, + ATH12K_MHI_POWER_OFF, + ATH12K_MHI_FORCE_POWER_OFF, + ATH12K_MHI_SUSPEND, + ATH12K_MHI_RESUME, + ATH12K_MHI_TRIGGER_RDDM, + ATH12K_MHI_RDDM, + ATH12K_MHI_RDDM_DONE, +}; + +extern const struct mhi_controller_config ath12k_mhi_config_qcn9274; +extern const struct mhi_controller_config ath12k_mhi_config_wcn7850; + +int ath12k_mhi_start(struct ath12k_pci *ar_pci); +void ath12k_mhi_stop(struct ath12k_pci *ar_pci); +int ath12k_mhi_register(struct ath12k_pci *ar_pci); +void ath12k_mhi_unregister(struct ath12k_pci *ar_pci); +void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab); +void ath12k_mhi_clear_vector(struct ath12k_base *ab); + +void ath12k_mhi_suspend(struct ath12k_pci *ar_pci); +void ath12k_mhi_resume(struct ath12k_pci *ar_pci); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c new file mode 100644 index 000000000000..ae7f6083c9fc --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -0,0 +1,1374 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/pci.h> + +#include "pci.h" +#include "core.h" +#include "hif.h" +#include "mhi.h" +#include "debug.h" + +#define ATH12K_PCI_BAR_NUM 0 +#define ATH12K_PCI_DMA_MASK 32 + +#define ATH12K_PCI_IRQ_CE0_OFFSET 3 + +#define WINDOW_ENABLE_BIT 0x40000000 +#define WINDOW_REG_ADDRESS 0x310c +#define WINDOW_VALUE_MASK GENMASK(24, 19) +#define WINDOW_START 0x80000 +#define WINDOW_RANGE_MASK GENMASK(18, 0) +#define WINDOW_STATIC_MASK GENMASK(31, 6) + +#define TCSR_SOC_HW_VERSION 0x1B00000 +#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) +#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) + +/* BAR0 + 4k is always accessible, and no + * need to force wakeup. + * 4K - 32 = 0xFE0 + */ +#define ACCESS_ALWAYS_OFF 0xFE0 + +#define QCN9274_DEVICE_ID 0x1109 +#define WCN7850_DEVICE_ID 0x1107 + +static const struct pci_device_id ath12k_pci_id_table[] = { + { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, + { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, + {0} +}; + +MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); + +/* TODO: revisit IRQ mapping for new SRNG's */ +static const struct ath12k_msi_config ath12k_msi_config[] = { + { + .total_vectors = 16, + .total_users = 3, + .users = (struct ath12k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, + }, +}; + +static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { + "bhi", + "mhi-er0", + "mhi-er1", + "ce0", + "ce1", + "ce2", + "ce3", + "ce4", + "ce5", + "ce6", + "ce7", + "ce8", + "ce9", + "ce10", + "ce11", + "ce12", + "ce13", + "ce14", + "ce15", + "host2wbm-desc-feed", + "host2reo-re-injection", + "host2reo-command", + "host2rxdma-monitor-ring3", + "host2rxdma-monitor-ring2", + "host2rxdma-monitor-ring1", + "reo2ost-exception", + "wbm2host-rx-release", + "reo2host-status", + "reo2host-destination-ring4", + "reo2host-destination-ring3", + "reo2host-destination-ring2", + "reo2host-destination-ring1", + "rxdma2host-monitor-destination-mac3", + "rxdma2host-monitor-destination-mac2", + "rxdma2host-monitor-destination-mac1", + "ppdu-end-interrupts-mac3", + "ppdu-end-interrupts-mac2", + "ppdu-end-interrupts-mac1", + "rxdma2host-monitor-status-ring-mac3", + "rxdma2host-monitor-status-ring-mac2", + "rxdma2host-monitor-status-ring-mac1", + "host2rxdma-host-buf-ring-mac3", + "host2rxdma-host-buf-ring-mac2", + "host2rxdma-host-buf-ring-mac1", + "rxdma2host-destination-ring-mac3", + "rxdma2host-destination-ring-mac2", + "rxdma2host-destination-ring-mac1", + "host2tcl-input-ring4", + "host2tcl-input-ring3", + "host2tcl-input-ring2", + "host2tcl-input-ring1", + "wbm2host-tx-completions-ring4", + "wbm2host-tx-completions-ring3", + "wbm2host-tx-completions-ring2", + "wbm2host-tx-completions-ring1", + "tcl2host-status-ring", +}; + +static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) +{ + struct ath12k_base *ab = ab_pci->ab; + + u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); + u32 static_window; + + lockdep_assert_held(&ab_pci->window_lock); + + /* Preserve the static window configuration and reset only dynamic window */ + static_window = ab_pci->register_window & WINDOW_STATIC_MASK; + window |= static_window; + + if (window != ab_pci->register_window) { + iowrite32(WINDOW_ENABLE_BIT | window, + ab->mem + WINDOW_REG_ADDRESS); + ioread32(ab->mem + WINDOW_REG_ADDRESS); + ab_pci->register_window = window; + } +} + +static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) +{ + u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); + u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); + u32 window; + + window = (umac_window << 12) | (ce_window << 6); + + spin_lock_bh(&ab_pci->window_lock); + ab_pci->register_window = window; + spin_unlock_bh(&ab_pci->window_lock); + + iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); +} + +static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, + u32 offset) +{ + u32 window_start; + + /* If offset lies within DP register range, use 3rd window */ + if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) + window_start = 3 * WINDOW_START; + /* If offset lies within CE register range, use 2nd window */ + else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) + window_start = 2 * WINDOW_START; + /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE + * use 0th window + */ + else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) && + !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK)) + window_start = 0; + else + window_start = WINDOW_START; + + return window_start; +} + +static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) +{ + u32 val, delay; + + val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); + + val |= PCIE_SOC_GLOBAL_RESET_V; + + ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); + + /* TODO: exact time to sleep is uncertain */ + delay = 10; + mdelay(delay); + + /* Need to toggle V bit back otherwise stuck in reset status */ + val &= ~PCIE_SOC_GLOBAL_RESET_V; + + ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); + + mdelay(delay); + + val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); + if (val == 0xffffffff) + ath12k_warn(ab, "link down error during global reset\n"); +} + +static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) +{ + u32 val; + + /* read cookie */ + val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); + ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); + + val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); + ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); + + /* TODO: exact time to sleep is uncertain */ + mdelay(10); + + /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from + * continuing warm path and entering dead loop. + */ + ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); + mdelay(10); + + val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); + ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); + + /* A read clear register. clear the register to prevent + * Q6 from entering wrong code path. + */ + val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); + ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); +} + +static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) +{ + u32 val; + int i; + + val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + + /* PCIE link seems very unstable after the Hot Reset*/ + for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { + if (val == 0xffffffff) + mdelay(5); + + ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); + val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + } + + ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); + + val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + val |= GCC_GCC_PCIE_HOT_RST_VAL; + ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); + val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + + ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); + + mdelay(5); +} + +static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) +{ + /* This is a WAR for PCIE Hotreset. + * When target receive Hotreset, but will set the interrupt. + * So when download SBL again, SBL will open Interrupt and + * receive it, and crash immediately. + */ + ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); +} + +static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) +{ + u32 val; + + val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); + val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; + ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); +} + +static void ath12k_pci_force_wake(struct ath12k_base *ab) +{ + ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); + mdelay(5); +} + +static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) +{ + if (power_on) { + ath12k_pci_enable_ltssm(ab); + ath12k_pci_clear_all_intrs(ab); + ath12k_pci_set_wlaon_pwr_ctrl(ab); + } + + ath12k_mhi_clear_vector(ab); + ath12k_pci_clear_dbg_registers(ab); + ath12k_pci_soc_global_reset(ab); + ath12k_mhi_set_mhictrl_reset(ab); +} + +static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) +{ + int i, j; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) + free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); + } +} + +static void ath12k_pci_free_irq(struct ath12k_base *ab) +{ + int i, irq_idx; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; + free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); + } + + ath12k_pci_free_ext_irq(ab); +} + +static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) +{ + u32 irq_idx; + + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; + enable_irq(ab->irq_num[irq_idx]); +} + +static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) +{ + u32 irq_idx; + + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; + disable_irq_nosync(ab->irq_num[irq_idx]); +} + +static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath12k_pci_ce_irq_disable(ab, i); + } +} + +static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) +{ + int i; + int irq_idx; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; + synchronize_irq(ab->irq_num[irq_idx]); + } +} + +static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) +{ + struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + + ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); + + ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); +} + +static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) +{ + struct ath12k_ce_pipe *ce_pipe = arg; + + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + + ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); + tasklet_schedule(&ce_pipe->intr_tq); + + return IRQ_HANDLED; +} + +static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) +{ + int i; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + + ath12k_pci_ext_grp_disable(irq_grp); + + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + } +} + +static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) +{ + int i, j, irq_idx; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + synchronize_irq(ab->irq_num[irq_idx]); + } + } +} + +static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) +{ + struct ath12k_ext_irq_grp *irq_grp = container_of(napi, + struct ath12k_ext_irq_grp, + napi); + struct ath12k_base *ab = irq_grp->ab; + int work_done; + + work_done = ath12k_dp_service_srng(ab, irq_grp, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + ath12k_pci_ext_grp_enable(irq_grp); + } + + if (work_done > budget) + work_done = budget; + + return work_done; +} + +static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) +{ + struct ath12k_ext_irq_grp *irq_grp = arg; + + ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); + + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + + ath12k_pci_ext_grp_disable(irq_grp); + + napi_schedule(&irq_grp->napi); + + return IRQ_HANDLED; +} + +static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) +{ + int i, j, ret, num_vectors = 0; + u32 user_base_data = 0, base_vector = 0, base_idx; + + base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; + ret = ath12k_pci_get_user_msi_assignment(ab, "DP", + &num_vectors, + &user_base_data, + &base_vector); + if (ret < 0) + return ret; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + u32 num_irq = 0; + + irq_grp->ab = ab; + irq_grp->grp_id = i; + init_dummy_netdev(&irq_grp->napi_ndev); + netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + ath12k_pci_ext_grp_napi_poll); + + if (ab->hw_params->ring_mask->tx[i] || + ab->hw_params->ring_mask->rx[i] || + ab->hw_params->ring_mask->rx_err[i] || + ab->hw_params->ring_mask->rx_wbm_rel[i] || + ab->hw_params->ring_mask->reo_status[i] || + ab->hw_params->ring_mask->host2rxdma[i] || + ab->hw_params->ring_mask->rx_mon_dest[i]) { + num_irq = 1; + } + + irq_grp->num_irq = num_irq; + irq_grp->irqs[0] = base_idx + i; + + for (j = 0; j < irq_grp->num_irq; j++) { + int irq_idx = irq_grp->irqs[j]; + int vector = (i % num_vectors) + base_vector; + int irq = ath12k_pci_get_msi_irq(ab->dev, vector); + + ab->irq_num[irq_idx] = irq; + + ath12k_dbg(ab, ATH12K_DBG_PCI, + "irq:%d group:%d\n", irq, i); + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, + IRQF_SHARED, + "DP_EXT_IRQ", irq_grp); + if (ret) { + ath12k_err(ab, "failed request irq %d: %d\n", + vector, ret); + return ret; + } + + disable_irq_nosync(ab->irq_num[irq_idx]); + } + } + + return 0; +} + +static int ath12k_pci_config_irq(struct ath12k_base *ab) +{ + struct ath12k_ce_pipe *ce_pipe; + u32 msi_data_start; + u32 msi_data_count, msi_data_idx; + u32 msi_irq_start; + unsigned int msi_data; + int irq, i, ret, irq_idx; + + ret = ath12k_pci_get_user_msi_assignment(ab, + "CE", &msi_data_count, + &msi_data_start, &msi_irq_start); + if (ret) + return ret; + + /* Configure CE irqs */ + + for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; + irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); + ce_pipe = &ab->ce.ce_pipe[i]; + + irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; + + tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); + + ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, + IRQF_SHARED, irq_name[irq_idx], + ce_pipe); + if (ret) { + ath12k_err(ab, "failed to request irq %d: %d\n", + irq_idx, ret); + return ret; + } + + ab->irq_num[irq_idx] = irq; + msi_data_idx++; + + ath12k_pci_ce_irq_disable(ab, i); + } + + ret = ath12k_pci_ext_irq_config(ab); + if (ret) + return ret; + + return 0; +} + +static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) +{ + struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; + + cfg->tgt_ce = ab->hw_params->target_ce_config; + cfg->tgt_ce_len = ab->hw_params->target_ce_count; + + cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; + cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; + ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; +} + +static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath12k_pci_ce_irq_enable(ab, i); + } +} + +static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) +{ + struct pci_dev *dev = ab_pci->pdev; + u16 control; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + + if (enable) + control |= PCI_MSI_FLAGS_ENABLE; + else + control &= ~PCI_MSI_FLAGS_ENABLE; + + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) +{ + ath12k_pci_msi_config(ab_pci, true); +} + +static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) +{ + ath12k_pci_msi_config(ab_pci, false); +} + +static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) +{ + struct ath12k_base *ab = ab_pci->ab; + const struct ath12k_msi_config *msi_config = ab_pci->msi_config; + struct msi_desc *msi_desc; + int num_vectors; + int ret; + + num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, + msi_config->total_vectors, + msi_config->total_vectors, + PCI_IRQ_MSI); + if (num_vectors != msi_config->total_vectors) { + ath12k_err(ab, "failed to get %d MSI vectors, only %d available", + msi_config->total_vectors, num_vectors); + + if (num_vectors >= 0) + return -EINVAL; + else + return num_vectors; + } + + ath12k_pci_msi_disable(ab_pci); + + msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); + if (!msi_desc) { + ath12k_err(ab, "msi_desc is NULL!\n"); + ret = -EINVAL; + goto free_msi_vector; + } + + ab_pci->msi_ep_base_data = msi_desc->msg.data; + if (msi_desc->pci.msi_attrib.is_64) + set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); + + ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); + + return 0; + +free_msi_vector: + pci_free_irq_vectors(ab_pci->pdev); + + return ret; +} + +static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) +{ + pci_free_irq_vectors(ab_pci->pdev); +} + +static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) +{ + struct ath12k_base *ab = ab_pci->ab; + u16 device_id; + int ret = 0; + + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + if (device_id != ab_pci->dev_id) { + ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", + device_id, ab_pci->dev_id); + ret = -EIO; + goto out; + } + + ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); + if (ret) { + ath12k_err(ab, "failed to assign pci resource: %d\n", ret); + goto out; + } + + ret = pci_enable_device(pdev); + if (ret) { + ath12k_err(ab, "failed to enable pci device: %d\n", ret); + goto out; + } + + ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); + if (ret) { + ath12k_err(ab, "failed to request pci region: %d\n", ret); + goto disable_device; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); + if (ret) { + ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", + ATH12K_PCI_DMA_MASK, ret); + goto release_region; + } + + pci_set_master(pdev); + + ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); + ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); + if (!ab->mem) { + ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); + ret = -EIO; + goto clear_master; + } + + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); + return 0; + +clear_master: + pci_clear_master(pdev); +release_region: + pci_release_region(pdev, ATH12K_PCI_BAR_NUM); +disable_device: + pci_disable_device(pdev); +out: + return ret; +} + +static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) +{ + struct ath12k_base *ab = ab_pci->ab; + struct pci_dev *pci_dev = ab_pci->pdev; + + pci_iounmap(pci_dev, ab->mem); + ab->mem = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); + if (pci_is_enabled(pci_dev)) + pci_disable_device(pci_dev); +} + +static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) +{ + struct ath12k_base *ab = ab_pci->ab; + + pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, + &ab_pci->link_ctl); + + ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", + ab_pci->link_ctl, + u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), + u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); + + /* disable L0s and L1 */ + pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, + ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + + set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); +} + +static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) +{ + if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) + pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, + ab_pci->link_ctl); +} + +static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + tasklet_kill(&ce_pipe->intr_tq); + } +} + +static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) +{ + ath12k_pci_ce_irqs_disable(ab); + ath12k_pci_sync_ce_irqs(ab); + ath12k_pci_kill_tasklets(ab); +} + +int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { + entry = &ab->hw_params->svc_to_ce_map[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} + +int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + + return pci_irq_vector(pci_dev, vector); +} + +int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + const struct ath12k_msi_config *msi_config = ab_pci->msi_config; + int idx; + + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *user_base_data = msi_config->users[idx].base_vector + + ab_pci->msi_ep_base_data; + *base_vector = msi_config->users[idx].base_vector; + + ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + user_name, *num_vectors, *user_base_data, + *base_vector); + + return 0; + } + } + + ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); + + return -EINVAL; +} + +void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct pci_dev *pci_dev = to_pci_dev(ab->dev); + + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, + msi_addr_lo); + + if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, + msi_addr_hi); + } else { + *msi_addr_hi = 0; + } +} + +void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, + u32 *msi_idx) +{ + u32 i, msi_data_idx; + + for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + if (ce_id == i) + break; + + msi_data_idx++; + } + *msi_idx = msi_data_idx; +} + +void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) +{ + ath12k_pci_ce_irqs_enable(ab); +} + +void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) +{ + ath12k_pci_ce_irq_disable_sync(ab); +} + +void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + napi_enable(&irq_grp->napi); + ath12k_pci_ext_grp_enable(irq_grp); + } +} + +void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) +{ + __ath12k_pci_ext_irq_disable(ab); + ath12k_pci_sync_ext_irqs(ab); +} + +int ath12k_pci_hif_suspend(struct ath12k_base *ab) +{ + struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); + + ath12k_mhi_suspend(ar_pci); + + return 0; +} + +int ath12k_pci_hif_resume(struct ath12k_base *ab) +{ + struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); + + ath12k_mhi_resume(ar_pci); + + return 0; +} + +void ath12k_pci_stop(struct ath12k_base *ab) +{ + ath12k_pci_ce_irq_disable_sync(ab); + ath12k_ce_cleanup_pipes(ab); +} + +int ath12k_pci_start(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + + set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + + ath12k_pci_aspm_restore(ab_pci); + + ath12k_pci_ce_irqs_enable(ab); + ath12k_ce_rx_post_buf(ab); + + return 0; +} + +u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + u32 val, window_start; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + + if (offset < WINDOW_START) { + val = ioread32(ab->mem + offset); + } else { + if (ab->static_window_map) + window_start = ath12k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath12k_pci_select_window(ab_pci, offset); + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + if ((!window_start) && + (offset >= PCI_MHIREGLEN_REG && + offset <= PCI_MHI_REGION_END)) + offset = offset - PCI_MHIREGLEN_REG; + + val = ioread32(ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } + } + + if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); + + return val; +} + +void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + u32 window_start; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + + if (offset < WINDOW_START) { + iowrite32(value, ab->mem + offset); + } else { + if (ab->static_window_map) + window_start = ath12k_pci_get_window_start(ab, offset); + else + window_start = WINDOW_START; + + if (window_start == WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath12k_pci_select_window(ab_pci, offset); + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + if ((!window_start) && + (offset >= PCI_MHIREGLEN_REG && + offset <= PCI_MHI_REGION_END)) + offset = offset - PCI_MHIREGLEN_REG; + + iowrite32(value, ab->mem + window_start + + (offset & WINDOW_RANGE_MASK)); + } + } + + if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); +} + +int ath12k_pci_power_up(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + int ret; + + ab_pci->register_window = 0; + clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + ath12k_pci_sw_reset(ab_pci->ab, true); + + /* Disable ASPM during firmware download due to problems switching + * to AMSS state. + */ + ath12k_pci_aspm_disable(ab_pci); + + ath12k_pci_msi_enable(ab_pci); + + ret = ath12k_mhi_start(ab_pci); + if (ret) { + ath12k_err(ab, "failed to start mhi: %d\n", ret); + return ret; + } + + if (ab->static_window_map) + ath12k_pci_select_static_window(ab_pci); + + return 0; +} + +void ath12k_pci_power_down(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + + /* restore aspm in case firmware bootup fails */ + ath12k_pci_aspm_restore(ab_pci); + + ath12k_pci_force_wake(ab_pci->ab); + ath12k_pci_msi_disable(ab_pci); + ath12k_mhi_stop(ab_pci); + clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + ath12k_pci_sw_reset(ab_pci->ab, false); +} + +static const struct ath12k_hif_ops ath12k_pci_hif_ops = { + .start = ath12k_pci_start, + .stop = ath12k_pci_stop, + .read32 = ath12k_pci_read32, + .write32 = ath12k_pci_write32, + .power_down = ath12k_pci_power_down, + .power_up = ath12k_pci_power_up, + .suspend = ath12k_pci_hif_suspend, + .resume = ath12k_pci_hif_resume, + .irq_enable = ath12k_pci_ext_irq_enable, + .irq_disable = ath12k_pci_ext_irq_disable, + .get_msi_address = ath12k_pci_get_msi_address, + .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, + .map_service_to_pipe = ath12k_pci_map_service_to_pipe, + .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, + .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, + .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, +}; + +static +void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) +{ + u32 soc_hw_version; + + soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); + *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, + soc_hw_version); + *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, + soc_hw_version); + + ath12k_dbg(ab, ATH12K_DBG_PCI, + "pci tcsr_soc_hw_version major %d minor %d\n", + *major, *minor); +} + +static int ath12k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_dev) +{ + struct ath12k_base *ab; + struct ath12k_pci *ab_pci; + u32 soc_hw_version_major, soc_hw_version_minor; + int ret; + + ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); + if (!ab) { + dev_err(&pdev->dev, "failed to allocate ath12k base\n"); + return -ENOMEM; + } + + ab->dev = &pdev->dev; + pci_set_drvdata(pdev, ab); + ab_pci = ath12k_pci_priv(ab); + ab_pci->dev_id = pci_dev->device; + ab_pci->ab = ab; + ab_pci->pdev = pdev; + ab->hif.ops = &ath12k_pci_hif_ops; + pci_set_drvdata(pdev, ab); + spin_lock_init(&ab_pci->window_lock); + + ret = ath12k_pci_claim(ab_pci, pdev); + if (ret) { + ath12k_err(ab, "failed to claim device: %d\n", ret); + goto err_free_core; + } + + switch (pci_dev->device) { + case QCN9274_DEVICE_ID: + ab_pci->msi_config = &ath12k_msi_config[0]; + ab->static_window_map = true; + ath12k_pci_read_hw_version(ab, &soc_hw_version_major, + &soc_hw_version_minor); + switch (soc_hw_version_major) { + case ATH12K_PCI_SOC_HW_VERSION_2: + ab->hw_rev = ATH12K_HW_QCN9274_HW20; + break; + case ATH12K_PCI_SOC_HW_VERSION_1: + ab->hw_rev = ATH12K_HW_QCN9274_HW10; + break; + default: + dev_err(&pdev->dev, + "Unknown hardware version found for QCN9274: 0x%x\n", + soc_hw_version_major); + return -EOPNOTSUPP; + } + break; + case WCN7850_DEVICE_ID: + ab_pci->msi_config = &ath12k_msi_config[0]; + ab->static_window_map = false; + ab->hw_rev = ATH12K_HW_WCN7850_HW20; + break; + + default: + dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", + pci_dev->device); + ret = -EOPNOTSUPP; + goto err_pci_free_region; + } + + ret = ath12k_pci_msi_alloc(ab_pci); + if (ret) { + ath12k_err(ab, "failed to alloc msi: %d\n", ret); + goto err_pci_free_region; + } + + ret = ath12k_core_pre_init(ab); + if (ret) + goto err_pci_msi_free; + + ret = ath12k_mhi_register(ab_pci); + if (ret) { + ath12k_err(ab, "failed to register mhi: %d\n", ret); + goto err_pci_msi_free; + } + + ret = ath12k_hal_srng_init(ab); + if (ret) + goto err_mhi_unregister; + + ret = ath12k_ce_alloc_pipes(ab); + if (ret) { + ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); + goto err_hal_srng_deinit; + } + + ath12k_pci_init_qmi_ce_config(ab); + + ret = ath12k_pci_config_irq(ab); + if (ret) { + ath12k_err(ab, "failed to config irq: %d\n", ret); + goto err_ce_free; + } + + ret = ath12k_core_init(ab); + if (ret) { + ath12k_err(ab, "failed to init core: %d\n", ret); + goto err_free_irq; + } + return 0; + +err_free_irq: + ath12k_pci_free_irq(ab); + +err_ce_free: + ath12k_ce_free_pipes(ab); + +err_hal_srng_deinit: + ath12k_hal_srng_deinit(ab); + +err_mhi_unregister: + ath12k_mhi_unregister(ab_pci); + +err_pci_msi_free: + ath12k_pci_msi_free(ab_pci); + +err_pci_free_region: + ath12k_pci_free_region(ab_pci); + +err_free_core: + ath12k_core_free(ab); + + return ret; +} + +static void ath12k_pci_remove(struct pci_dev *pdev) +{ + struct ath12k_base *ab = pci_get_drvdata(pdev); + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + + if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath12k_pci_power_down(ab); + ath12k_qmi_deinit_service(ab); + goto qmi_fail; + } + + set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); + + cancel_work_sync(&ab->reset_work); + ath12k_core_deinit(ab); + +qmi_fail: + ath12k_mhi_unregister(ab_pci); + + ath12k_pci_free_irq(ab); + ath12k_pci_msi_free(ab_pci); + ath12k_pci_free_region(ab_pci); + + ath12k_hal_srng_deinit(ab); + ath12k_ce_free_pipes(ab); + ath12k_core_free(ab); +} + +static void ath12k_pci_shutdown(struct pci_dev *pdev) +{ + struct ath12k_base *ab = pci_get_drvdata(pdev); + + ath12k_pci_power_down(ab); +} + +static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_suspend(ab); + if (ret) + ath12k_warn(ab, "failed to suspend core: %d\n", ret); + + return ret; +} + +static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_resume(ab); + if (ret) + ath12k_warn(ab, "failed to resume core: %d\n", ret); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops, + ath12k_pci_pm_suspend, + ath12k_pci_pm_resume); + +static struct pci_driver ath12k_pci_driver = { + .name = "ath12k_pci", + .id_table = ath12k_pci_id_table, + .probe = ath12k_pci_probe, + .remove = ath12k_pci_remove, + .shutdown = ath12k_pci_shutdown, + .driver.pm = &ath12k_pci_pm_ops, +}; + +static int ath12k_pci_init(void) +{ + int ret; + + ret = pci_register_driver(&ath12k_pci_driver); + if (ret) { + pr_err("failed to register ath12k pci driver: %d\n", + ret); + return ret; + } + + return 0; +} +module_init(ath12k_pci_init); + +static void ath12k_pci_exit(void) +{ + pci_unregister_driver(&ath12k_pci_driver); +} + +module_exit(ath12k_pci_exit); + +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h new file mode 100644 index 000000000000..0d9e40ab31f2 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/pci.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_PCI_H +#define ATH12K_PCI_H + +#include <linux/mhi.h> + +#include "core.h" + +#define PCIE_SOC_GLOBAL_RESET 0x3008 +#define PCIE_SOC_GLOBAL_RESET_V 1 + +#define WLAON_WARM_SW_ENTRY 0x1f80504 +#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c + +#define PCIE_Q6_COOKIE_ADDR 0x01f80500 +#define PCIE_Q6_COOKIE_DATA 0xc0000000 + +/* register to wake the UMAC from power collapse */ +#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040 + +/* register used for handshake mechanism to validate UMAC is awake */ +#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004 + +#define PCIE_PCIE_PARF_LTSSM 0x1e081b0 +#define PARM_LTSSM_VALUE 0x111 + +#define GCC_GCC_PCIE_HOT_RST 0x1e38338 +#define GCC_GCC_PCIE_HOT_RST_VAL 0x10 + +#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228 +#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 +#define PCIE_INT_CLEAR_ALL 0xffffffff + +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \ + ((ab)->hw_params->regs->pcie_qserdes_sysclk_en_sel) +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff +#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \ + ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base) +#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02 +#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \ + ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0x4) +#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52 +#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \ + ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0xc) +#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff +#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff + +#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c +#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 + +#define PCI_BAR_WINDOW0_BASE 0x1E00000 +#define PCI_BAR_WINDOW0_END 0x1E7FFFC +#define PCI_SOC_RANGE_MASK 0x3FFF +#define PCI_SOC_PCI_REG_BASE 0x1E04000 +#define PCI_SOC_PCI_REG_END 0x1E07FFC +#define PCI_PARF_BASE 0x1E08000 +#define PCI_PARF_END 0x1E0BFFC +#define PCI_MHIREGLEN_REG 0x1E0E100 +#define PCI_MHI_REGION_END 0x1E0EFFC +#define QRTR_PCI_DOMAIN_NR_MASK GENMASK(7, 4) +#define QRTR_PCI_BUS_NUMBER_MASK GENMASK(3, 0) + +#define ATH12K_PCI_SOC_HW_VERSION_1 1 +#define ATH12K_PCI_SOC_HW_VERSION_2 2 + +struct ath12k_msi_user { + const char *name; + int num_vectors; + u32 base_vector; +}; + +struct ath12k_msi_config { + int total_vectors; + int total_users; + const struct ath12k_msi_user *users; +}; + +enum ath12k_pci_flags { + ATH12K_PCI_FLAG_INIT_DONE, + ATH12K_PCI_FLAG_IS_MSI_64, + ATH12K_PCI_ASPM_RESTORE, +}; + +struct ath12k_pci { + struct pci_dev *pdev; + struct ath12k_base *ab; + u16 dev_id; + char amss_path[100]; + u32 msi_ep_base_data; + struct mhi_controller *mhi_ctrl; + const struct ath12k_msi_config *msi_config; + unsigned long mhi_state; + u32 register_window; + + /* protects register_window above */ + spinlock_t window_lock; + + /* enum ath12k_pci_flags */ + unsigned long flags; + u16 link_ctl; +}; + +static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab) +{ + return (struct ath12k_pci *)ab->drv_priv; +} + +int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); +int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector); +void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value); +u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset); +int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi); +void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, + u32 *msi_idx); +void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab); +void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab); +void ath12k_pci_ext_irq_enable(struct ath12k_base *ab); +void ath12k_pci_ext_irq_disable(struct ath12k_base *ab); +int ath12k_pci_hif_suspend(struct ath12k_base *ab); +int ath12k_pci_hif_resume(struct ath12k_base *ab); +void ath12k_pci_stop(struct ath12k_base *ab); +int ath12k_pci_start(struct ath12k_base *ab); +int ath12k_pci_power_up(struct ath12k_base *ab); +void ath12k_pci_power_down(struct ath12k_base *ab); +#endif /* ATH12K_PCI_H */ diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c new file mode 100644 index 000000000000..19c0626fbff1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/peer.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "peer.h" +#include "debug.h" + +struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id, + const u8 *addr) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + if (!ether_addr_equal(peer->addr, addr)) + continue; + + return peer; + } + + return NULL; +} + +static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab, + u8 pdev_idx, const u8 *addr) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (peer->pdev_idx != pdev_idx) + continue; + if (!ether_addr_equal(peer->addr, addr)) + continue; + + return peer; + } + + return NULL; +} + +struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab, + const u8 *addr) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (!ether_addr_equal(peer->addr, addr)) + continue; + + return peer; + } + + return NULL; +} + +struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, + int peer_id) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) + if (peer_id == peer->peer_id) + return peer; + + return NULL; +} + +bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id) +{ + struct ath12k_peer *peer; + + spin_lock_bh(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (vdev_id == peer->vdev_id) { + spin_unlock_bh(&ab->base_lock); + return true; + } + } + spin_unlock_bh(&ab->base_lock); + return false; +} + +struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, + int ast_hash) +{ + struct ath12k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) + if (ast_hash == peer->ast_hash) + return peer; + + return NULL; +} + +void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id) +{ + struct ath12k_peer *peer; + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find_by_id(ab, peer_id); + if (!peer) { + ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n", + peer_id); + goto exit; + } + + ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n", + peer->vdev_id, peer->addr, peer_id); + + list_del(&peer->list); + kfree(peer); + wake_up(&ab->peer_mapping_wq); + +exit: + spin_unlock_bh(&ab->base_lock); +} + +void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id, + u8 *mac_addr, u16 ast_hash, u16 hw_peer_id) +{ + struct ath12k_peer *peer; + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find(ab, vdev_id, mac_addr); + if (!peer) { + peer = kzalloc(sizeof(*peer), GFP_ATOMIC); + if (!peer) + goto exit; + + peer->vdev_id = vdev_id; + peer->peer_id = peer_id; + peer->ast_hash = ast_hash; + peer->hw_peer_id = hw_peer_id; + ether_addr_copy(peer->addr, mac_addr); + list_add(&peer->list, &ab->peers); + wake_up(&ab->peer_mapping_wq); + } + + ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n", + vdev_id, mac_addr, peer_id); + +exit: + spin_unlock_bh(&ab->base_lock); +} + +static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id, + const u8 *addr, bool expect_mapped) +{ + int ret; + + ret = wait_event_timeout(ab->peer_mapping_wq, ({ + bool mapped; + + spin_lock_bh(&ab->base_lock); + mapped = !!ath12k_peer_find(ab, vdev_id, addr); + spin_unlock_bh(&ab->base_lock); + + (mapped == expect_mapped || + test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)); + }), 3 * HZ); + + if (ret <= 0) + return -ETIMEDOUT; + + return 0; +} + +void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id) +{ + struct ath12k_peer *peer, *tmp; + struct ath12k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + list_for_each_entry_safe(peer, tmp, &ab->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + + ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n", + peer->addr, vdev_id); + + list_del(&peer->list); + kfree(peer); + ar->num_peers--; + } + + spin_unlock_bh(&ab->base_lock); +} + +static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr) +{ + return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false); +} + +int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id, + const u8 *addr) +{ + int ret; + unsigned long time_left; + + ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) { + ath12k_warn(ar->ab, "failed wait for peer deleted"); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->peer_delete_done, + 3 * HZ); + if (time_left == 0) { + ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->peer_delete_done); + + ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); + if (ret) { + ath12k_warn(ar->ab, + "failed to delete peer vdev_id %d addr %pM ret %d\n", + vdev_id, addr, ret); + return ret; + } + + ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr); + if (ret) + return ret; + + ar->num_peers--; + + return 0; +} + +static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr) +{ + return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true); +} + +int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_create_arg *arg) +{ + struct ath12k_peer *peer; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->num_peers > (ar->max_num_peers - 1)) { + ath12k_warn(ar->ab, + "failed to create peer due to insufficient peer entry resource in firmware\n"); + return -ENOBUFS; + } + + spin_lock_bh(&ar->ab->base_lock); + peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr); + if (peer) { + spin_unlock_bh(&ar->ab->base_lock); + return -EINVAL; + } + spin_unlock_bh(&ar->ab->base_lock); + + ret = ath12k_wmi_send_peer_create_cmd(ar, arg); + if (ret) { + ath12k_warn(ar->ab, + "failed to send peer create vdev_id %d ret %d\n", + arg->vdev_id, ret); + return ret; + } + + ret = ath12k_wait_for_peer_created(ar, arg->vdev_id, + arg->peer_addr); + if (ret) + return ret; + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr); + if (!peer) { + spin_unlock_bh(&ar->ab->base_lock); + ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n", + arg->peer_addr, arg->vdev_id); + + reinit_completion(&ar->peer_delete_done); + + ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr, + arg->vdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", + arg->vdev_id, arg->peer_addr); + return ret; + } + + ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id, + arg->peer_addr); + if (ret) + return ret; + + return -ENOENT; + } + + peer->pdev_idx = ar->pdev_idx; + peer->sta = sta; + + if (arvif->vif->type == NL80211_IFTYPE_STATION) { + arvif->ast_hash = peer->ast_hash; + arvif->ast_idx = peer->hw_peer_id; + } + + peer->sec_type = HAL_ENCRYPT_TYPE_OPEN; + peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; + + ar->num_peers++; + + spin_unlock_bh(&ar->ab->base_lock); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h new file mode 100644 index 000000000000..b296dc0e2f67 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/peer.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_PEER_H +#define ATH12K_PEER_H + +#include "dp_rx.h" + +struct ppdu_user_delayba { + u16 sw_peer_id; + u32 info0; + u16 ru_end; + u16 ru_start; + u32 info1; + u32 rate_flags; + u32 resp_rate_flags; +}; + +struct ath12k_peer { + struct list_head list; + struct ieee80211_sta *sta; + int vdev_id; + u8 addr[ETH_ALEN]; + int peer_id; + u16 ast_hash; + u8 pdev_idx; + u16 hw_peer_id; + + /* protected by ab->data_lock */ + struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; + struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1]; + + /* Info used in MMIC verification of + * RX fragments + */ + struct crypto_shash *tfm_mmic; + u8 mcast_keyidx; + u8 ucast_keyidx; + u16 sec_type; + u16 sec_type_grp; + struct ppdu_user_delayba ppdu_stats_delayba; + bool delayba_flag; + bool is_authorized; +}; + +void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id); +void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id, + u8 *mac_addr, u16 ast_hash, u16 hw_peer_id); +struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id, + const u8 *addr); +struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab, + const u8 *addr); +struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id); +void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id); +int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr); +int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_create_arg *arg); +int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id, + const u8 *addr); +bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id); +struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash); + +#endif /* _PEER_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c new file mode 100644 index 000000000000..979a63f2e2ab --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -0,0 +1,3087 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/elf.h> + +#include "qmi.h" +#include "core.h" +#include "debug.h" +#include <linux/of.h> +#include <linux/firmware.h> + +#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 +#define HOST_CSTATE_BIT 0x04 +#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 +#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152 + +static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01, + chip_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01, + num_local_links), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01, + .elem_size = sizeof(u8), + .array_type = STATIC_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01, + hw_link_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01, + .elem_size = sizeof(u8), + .array_type = STATIC_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01, + valid_mlo_link_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + num_clients_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + num_clients), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + wake_msi_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + wake_msi), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_duration_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_duraiton), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + platform_name_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + platform_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1F, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + ddr_range_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01, + .elem_size = sizeof(struct qmi_wlanfw_host_ddr_range), + .array_type = STATIC_ARRAY, + .tlv_type = 0x1F, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + ddr_range), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + host_build_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_host_build_type), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + host_build_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x21, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_capable_valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x21, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_capable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x22, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_chip_id_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x22, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_chip_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x23, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_group_id_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x23, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_group_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x24, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + max_mlo_peer_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x24, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + max_mlo_peer), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x25, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_num_chips_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x25, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_num_chips), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x26, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_chip_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MLO_CHIPS_V01, + .elem_size = sizeof(struct wlfw_host_mlo_chip_info_s_v01), + .array_type = STATIC_ARRAY, + .tlv_type = 0x26, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mlo_chip_info), + .ei_array = wlfw_host_mlo_chip_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x27, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + feature_list_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x27, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + feature_list), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_download_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_download_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_update_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_update_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + msa_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + msa_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + pin_connect_result_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + pin_connect_result_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + request_mem_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + request_mem_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_mem_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_mem_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_init_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_init_done_enable), + }, + + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + rejuvenate_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + rejuvenate_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + xo_cal_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + xo_cal_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + cal_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + cal_done_enable), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + fw_status_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + fw_status), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01, + .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg), + .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, + mem_seg), + .ei_array = qmi_wlanfw_mem_seg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, + mem_seg), + .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, + chip_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, + chip_family), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01, + board_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01, + start), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, + fw_version), + }, + { + .data_type = QMI_STRING, + .elem_len = ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, + fw_build_timestamp), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + chip_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + chip_info), + .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + board_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + board_info), + .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + soc_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + soc_info), + .ei_array = qmi_wlanfw_soc_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_version_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_version_info), + .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_build_id_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_build_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + num_macs_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + num_macs), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + voltage_mv_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + voltage_mv), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + time_freq_hz_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + time_freq_hz), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + otp_version_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + otp_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + eeprom_caldata_read_timeout_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + eeprom_caldata_read_timeout), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_caps_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_caps), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + rd_card_chain_cap_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + rd_card_chain_cap), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + dev_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01, + .elem_size = sizeof(struct qmi_wlanfw_dev_mem_info_s_v01), + .array_type = STATIC_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, dev_mem), + .ei_array = qmi_wlanfw_dev_mem_info_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + end), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + bdf_type_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + bdf_type), + }, + + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + nentries), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + nbytes_max), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + flags), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + service_id), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, + offset), + }, + { + .data_type = QMI_EOTI, + .array_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01, + addr), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + hw_debug_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + hw_debug), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + host_version_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + host_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_NUM_CE_V01, + .elem_size = sizeof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg), + .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01, + .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg), + .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01, + .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg), + .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v3_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v3_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01, + .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v3), + .ei_array = qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + }, +}; + +static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req) +{ + req->mlo_capable_valid = 1; + req->mlo_capable = 1; + req->mlo_chip_id_valid = 1; + req->mlo_chip_id = 0; + req->mlo_group_id_valid = 1; + req->mlo_group_id = 0; + req->max_mlo_peer_valid = 1; + /* Max peer number generally won't change for the same device + * but needs to be synced with host driver. + */ + req->max_mlo_peer = 32; + req->mlo_num_chips_valid = 1; + req->mlo_num_chips = 1; + req->mlo_chip_info_valid = 1; + req->mlo_chip_info[0].chip_id = 0; + req->mlo_chip_info[0].num_local_links = 2; + req->mlo_chip_info[0].hw_link_id[0] = 0; + req->mlo_chip_info[0].hw_link_id[1] = 1; + req->mlo_chip_info[0].valid_mlo_link_id[0] = 1; + req->mlo_chip_info[0].valid_mlo_link_id[1] = 1; +} + +static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) +{ + struct qmi_wlanfw_host_cap_req_msg_v01 req; + struct qmi_wlanfw_host_cap_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.num_clients_valid = 1; + req.num_clients = 1; + req.mem_cfg_mode = ab->qmi.target_mem_mode; + req.mem_cfg_mode_valid = 1; + req.bdf_support_valid = 1; + req.bdf_support = 1; + + req.m3_support_valid = 1; + req.m3_support = 1; + req.m3_cache_support_valid = 1; + req.m3_cache_support = 1; + + req.cal_done_valid = 1; + req.cal_done = ab->qmi.cal_done; + + req.feature_list_valid = 1; + req.feature_list = BIT(CNSS_QDSS_CFG_MISS_V01); + + /* BRINGUP: here we are piggybacking a lot of stuff using + * internal_sleep_clock, should it be split? + */ + if (ab->hw_params->internal_sleep_clock) { + req.nm_modem_valid = 1; + + /* Notify firmware that this is non-qualcomm platform. */ + req.nm_modem |= HOST_CSTATE_BIT; + + /* Notify firmware about the sleep clock selection, + * nm_modem_bit[1] is used for this purpose. Host driver on + * non-qualcomm platforms should select internal sleep + * clock. + */ + req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; + req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; + + ath12k_host_cap_parse_mlo(&req); + } + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_HOST_CAP_REQ_V01, + QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_host_cap_req_msg_v01_ei, &req); + if (ret < 0) { + ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "Host capability request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + +static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab) +{ + struct qmi_wlanfw_ind_register_req_msg_v01 *req; + struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; + struct qmi_handle *handle = &ab->qmi.handle; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + ret = -ENOMEM; + goto resp_out; + } + + req->client_id_valid = 1; + req->client_id = QMI_WLANFW_CLIENT_ID; + req->fw_ready_enable_valid = 1; + req->fw_ready_enable = 1; + req->request_mem_enable_valid = 1; + req->request_mem_enable = 1; + req->fw_mem_ready_enable_valid = 1; + req->fw_mem_ready_enable = 1; + req->cal_done_enable_valid = 1; + req->cal_done_enable = 1; + req->fw_init_done_enable_valid = 1; + req->fw_init_done_enable = 1; + + req->pin_connect_result_enable_valid = 0; + req->pin_connect_result_enable = 0; + + ret = qmi_txn_init(handle, &txn, + qmi_wlanfw_ind_register_resp_msg_v01_ei, resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_IND_REGISTER_REQ_V01, + QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_ind_register_req_msg_v01_ei, req); + if (ret < 0) { + ath12k_warn(ab, "Failed to send indication register request, err = %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "failed to register fw indication %d\n", ret); + goto out; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n", + resp->resp.result, resp->resp.error); + ret = -EINVAL; + goto out; + } + +out: + kfree(resp); +resp_out: + kfree(req); + return ret; +} + +static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) +{ + struct qmi_wlanfw_respond_mem_req_msg_v01 *req; + struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0, i; + bool delayed; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + memset(&resp, 0, sizeof(resp)); + + /* Some targets by default request a block of big contiguous + * DMA memory, it's hard to allocate from kernel. So host returns + * failure to firmware and firmware then request multiple blocks of + * small chunk size memory. + */ + if (ab->qmi.target_mem_delayed) { + delayed = true; + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n", + ab->qmi.mem_seg_count); + memset(req, 0, sizeof(*req)); + } else { + delayed = false; + req->mem_seg_len = ab->qmi.mem_seg_count; + for (i = 0; i < req->mem_seg_len ; i++) { + req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr; + req->mem_seg[i].size = ab->qmi.target_mem[i].size; + req->mem_seg[i].type = ab->qmi.target_mem[i].type; + ath12k_dbg(ab, ATH12K_DBG_QMI, + "qmi req mem_seg[%d] %pad %u %u\n", i, + &ab->qmi.target_mem[i].paddr, + ab->qmi.target_mem[i].size, + ab->qmi.target_mem[i].type); + } + } + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_RESPOND_MEM_REQ_V01, + QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_respond_mem_req_msg_v01_ei, req); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + /* the error response is expected when + * target_mem_delayed is true. + */ + if (delayed && resp.resp.error == 0) + goto out; + + ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } +out: + kfree(req); + return ret; +} + +static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + if (!ab->qmi.target_mem[i].v.addr) + continue; + dma_free_coherent(ab->dev, + ab->qmi.target_mem[i].size, + ab->qmi.target_mem[i].v.addr, + ab->qmi.target_mem[i].paddr); + ab->qmi.target_mem[i].v.addr = NULL; + } +} + +static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) +{ + int i; + struct target_mem_chunk *chunk; + + ab->qmi.target_mem_delayed = false; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + chunk = &ab->qmi.target_mem[i]; + + /* Allocate memory for the region and the functionality supported + * on the host. For the non-supported memory region, host does not + * allocate memory, assigns NULL and FW will handle this without crashing. + */ + switch (chunk->type) { + case HOST_DDR_REGION_TYPE: + case M3_DUMP_REGION_TYPE: + case PAGEABLE_MEM_REGION_TYPE: + case CALDB_MEM_REGION_TYPE: + chunk->v.addr = dma_alloc_coherent(ab->dev, + chunk->size, + &chunk->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!chunk->v.addr) { + if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { + ab->qmi.target_mem_delayed = true; + ath12k_warn(ab, + "qmi dma allocation failed (%d B type %u), will try later with small size\n", + chunk->size, + chunk->type); + ath12k_qmi_free_target_mem_chunk(ab); + return 0; + } + ath12k_warn(ab, "memory allocation failure for %u size: %d\n", + chunk->type, chunk->size); + return -ENOMEM; + } + break; + default: + ath12k_warn(ab, "memory type %u not supported\n", + chunk->type); + chunk->paddr = 0; + chunk->v.addr = NULL; + break; + } + } + return 0; +} + +static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) +{ + struct qmi_wlanfw_cap_req_msg_v01 req; + struct qmi_wlanfw_cap_resp_msg_v01 resp; + struct qmi_txn txn = {}; + unsigned int board_id = ATH12K_BOARD_ID_DEFAULT; + int ret = 0; + int i; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_cap_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_CAP_REQ_V01, + QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_cap_req_msg_v01_ei, &req); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "qmi failed target cap request %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (resp.chip_info_valid) { + ab->qmi.target.chip_id = resp.chip_info.chip_id; + ab->qmi.target.chip_family = resp.chip_info.chip_family; + } + + if (resp.board_info_valid) + ab->qmi.target.board_id = resp.board_info.board_id; + else + ab->qmi.target.board_id = board_id; + + if (resp.soc_info_valid) + ab->qmi.target.soc_id = resp.soc_info.soc_id; + + if (resp.fw_version_info_valid) { + ab->qmi.target.fw_version = resp.fw_version_info.fw_version; + strscpy(ab->qmi.target.fw_build_timestamp, + resp.fw_version_info.fw_build_timestamp, + sizeof(ab->qmi.target.fw_build_timestamp)); + } + + if (resp.fw_build_id_valid) + strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id, + sizeof(ab->qmi.target.fw_build_id)); + + if (resp.dev_mem_info_valid) { + for (i = 0; i < ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) { + ab->qmi.dev_mem[i].start = + resp.dev_mem[i].start; + ab->qmi.dev_mem[i].size = + resp.dev_mem[i].size; + ath12k_dbg(ab, ATH12K_DBG_QMI, + "devmem [%d] start ox%llx size %llu\n", i, + ab->qmi.dev_mem[i].start, + ab->qmi.dev_mem[i].size); + } + } + + if (resp.eeprom_caldata_read_timeout_valid) { + ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout; + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n"); + } + + ath12k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", + ab->qmi.target.chip_id, ab->qmi.target.chip_family, + ab->qmi.target.board_id, ab->qmi.target.soc_id); + + ath12k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s", + ab->qmi.target.fw_version, + ab->qmi.target.fw_build_timestamp, + ab->qmi.target.fw_build_id); + +out: + return ret; +} + +static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab, + const u8 *data, u32 len, u8 type) +{ + struct qmi_wlanfw_bdf_download_req_msg_v01 *req; + struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; + struct qmi_txn txn = {}; + const u8 *temp = data; + int ret; + u32 remaining = len; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + memset(&resp, 0, sizeof(resp)); + + while (remaining) { + req->valid = 1; + req->file_id_valid = 1; + req->file_id = ab->qmi.target.board_id; + req->total_size_valid = 1; + req->total_size = remaining; + req->seg_id_valid = 1; + req->data_valid = 1; + req->bdf_type = type; + req->bdf_type_valid = 1; + req->end_valid = 1; + req->end = 0; + + if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) { + req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01; + } else { + req->data_len = remaining; + req->end = 1; + } + + if (type == ATH12K_QMI_FILE_TYPE_EEPROM) { + req->data_valid = 0; + req->end = 1; + req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE; + } else { + memcpy(req->data, temp, req->data_len); + } + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_bdf_download_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf download req fixed addr type %d\n", + type); + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, + QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_bdf_download_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (type == ATH12K_QMI_FILE_TYPE_EEPROM) { + remaining = 0; + } else { + remaining -= req->data_len; + temp += req->data_len; + req->seg_id++; + ath12k_dbg(ab, ATH12K_DBG_QMI, + "qmi bdf download request remaining %i\n", + remaining); + } + } + +out: + kfree(req); + return ret; +} + +static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, + enum ath12k_qmi_bdf_type type) +{ + struct device *dev = ab->dev; + char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE]; + const struct firmware *fw_entry; + struct ath12k_board_data bd; + u32 fw_size, file_type; + int ret = 0; + const u8 *tmp; + + memset(&bd, 0, sizeof(bd)); + + switch (type) { + case ATH12K_QMI_BDF_TYPE_ELF: + ret = ath12k_core_fetch_bdf(ab, &bd); + if (ret) { + ath12k_warn(ab, "qmi failed to load bdf:\n"); + goto out; + } + + if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) + type = ATH12K_QMI_BDF_TYPE_ELF; + else + type = ATH12K_QMI_BDF_TYPE_BIN; + + break; + case ATH12K_QMI_BDF_TYPE_REGDB: + ret = ath12k_core_fetch_board_data_api_1(ab, &bd, + ATH12K_REGDB_FILE_NAME); + if (ret) { + ath12k_warn(ab, "qmi failed to load regdb bin:\n"); + goto out; + } + break; + case ATH12K_QMI_BDF_TYPE_CALIBRATION: + + if (ab->qmi.target.eeprom_caldata) { + file_type = ATH12K_QMI_FILE_TYPE_EEPROM; + tmp = filename; + fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE; + } else { + file_type = ATH12K_QMI_FILE_TYPE_CALDATA; + + /* cal-<bus>-<id>.bin */ + snprintf(filename, sizeof(filename), "cal-%s-%s.bin", + ath12k_bus_str(ab->hif.bus), dev_name(dev)); + fw_entry = ath12k_core_firmware_request(ab, filename); + if (!IS_ERR(fw_entry)) + goto success; + + fw_entry = ath12k_core_firmware_request(ab, + ATH12K_DEFAULT_CAL_FILE); + if (IS_ERR(fw_entry)) { + ret = PTR_ERR(fw_entry); + ath12k_warn(ab, + "qmi failed to load CAL data file:%s\n", + filename); + goto out; + } + +success: + fw_size = min_t(u32, ab->hw_params->fw.board_size, + fw_entry->size); + tmp = fw_entry->data; + } + ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to load caldata\n"); + goto out_qmi_cal; + } + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi caldata downloaded: type: %u\n", + file_type); + +out_qmi_cal: + if (!ab->qmi.target.eeprom_caldata) + release_firmware(fw_entry); + return ret; + default: + ath12k_warn(ab, "unknown file type for load %d", type); + goto out; + } + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf_type %d\n", type); + + fw_size = min_t(u32, ab->hw_params->fw.board_size, bd.len); + + ret = ath12k_qmi_load_file_target_mem(ab, bd.data, fw_size, type); + if (ret < 0) + ath12k_warn(ab, "qmi failed to load bdf file\n"); + +out: + ath12k_core_free_bdf(ab, &bd); + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi BDF download sequence completed\n"); + + return ret; +} + +static int ath12k_qmi_m3_load(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + const struct firmware *fw; + char path[100]; + int ret; + + if (m3_mem->vaddr || m3_mem->size) + return 0; + + fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE); + if (IS_ERR(fw)) { + ret = PTR_ERR(fw); + ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE, + path, sizeof(path)); + ath12k_err(ab, "failed to load %s: %d\n", path, ret); + return ret; + } + + m3_mem->vaddr = dma_alloc_coherent(ab->dev, + fw->size, &m3_mem->paddr, + GFP_KERNEL); + if (!m3_mem->vaddr) { + ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n", + fw->size); + release_firmware(fw); + return -ENOMEM; + } + + memcpy(m3_mem->vaddr, fw->data, fw->size); + m3_mem->size = fw->size; + release_firmware(fw); + + return 0; +} + +static void ath12k_qmi_m3_free(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + + if (!m3_mem->vaddr) + return; + + dma_free_coherent(ab->dev, m3_mem->size, + m3_mem->vaddr, m3_mem->paddr); + m3_mem->vaddr = NULL; +} + +static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + struct qmi_wlanfw_m3_info_req_msg_v01 req; + struct qmi_wlanfw_m3_info_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + ret = ath12k_qmi_m3_load(ab); + if (ret) { + ath12k_err(ab, "failed to load m3 firmware: %d", ret); + return ret; + } + + req.addr = m3_mem->paddr; + req.size = m3_mem->size; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_M3_INFO_REQ_V01, + QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, + qmi_wlanfw_m3_info_req_msg_v01_ei, &req); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "qmi failed M3 information request %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } +out: + return ret; +} + +static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab, + u32 mode) +{ + struct qmi_wlanfw_wlan_mode_req_msg_v01 req; + struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.mode = mode; + req.hw_debug_valid = 1; + req.hw_debug = 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_WLAN_MODE_REQ_V01, + QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", + mode, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + if (mode == ATH12K_FIRMWARE_MODE_OFF && ret == -ENETRESET) { + ath12k_warn(ab, "WLFW service is dis-connected\n"); + return 0; + } + ath12k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n", + mode, ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n", + mode, resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + +static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) +{ + struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; + struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; + struct ce_pipe_config *ce_cfg; + struct service_to_pipe *svc_cfg; + struct qmi_txn txn = {}; + int ret = 0, pipe_num; + + ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; + svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + memset(&resp, 0, sizeof(resp)); + + req->host_version_valid = 1; + strscpy(req->host_version, ATH12K_HOST_VERSION_STRING, + sizeof(req->host_version)); + + req->tgt_cfg_valid = 1; + /* This is number of CE configs */ + req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len; + for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) { + req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum; + req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir; + req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries; + req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max; + req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags; + } + + req->svc_cfg_valid = 1; + /* This is number of Service/CE configs */ + req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len; + for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) { + req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id; + req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir; + req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum; + } + + /* set shadow v3 configuration */ + if (ab->hw_params->supports_shadow_regs) { + req->shadow_reg_v3_valid = 1; + req->shadow_reg_v3_len = min_t(u32, + ab->qmi.ce_cfg.shadow_reg_v3_len, + QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01); + memcpy(&req->shadow_reg_v3, ab->qmi.ce_cfg.shadow_reg_v3, + sizeof(u32) * req->shadow_reg_v3_len); + } else { + req->shadow_reg_v3_valid = 0; + } + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_WLAN_CFG_REQ_V01, + QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath12k_warn(ab, "qmi failed wlan config request, err = %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath12k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + +out: + kfree(req); + return ret; +} + +void ath12k_qmi_firmware_stop(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send wlan mode off\n"); + return; + } +} + +int ath12k_qmi_firmware_start(struct ath12k_base *ab, + u32 mode) +{ + int ret; + + ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret); + return ret; + } + + ret = ath12k_qmi_wlanfw_mode_send(ab, mode); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + return ret; + } + + return 0; +} + +static int +ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi, + enum ath12k_qmi_event_type type, + void *data) +{ + struct ath12k_qmi_driver_event *event; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return -ENOMEM; + + event->type = type; + event->data = data; + + spin_lock(&qmi->event_lock); + list_add_tail(&event->list, &qmi->event_list); + spin_unlock(&qmi->event_lock); + + queue_work(qmi->event_wq, &qmi->event_work); + + return 0; +} + +static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab; + int ret; + + ret = ath12k_qmi_fw_ind_register_send(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret); + return ret; + } + + ret = ath12k_qmi_host_cap_send(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); + return ret; + } + + return ret; +} + +static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab; + int ret; + + ret = ath12k_qmi_respond_fw_mem_request(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret); + return ret; + } + + return ret; +} + +static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) +{ + struct ath12k_base *ab = qmi->ab; + int ret; + + ret = ath12k_qmi_request_target_cap(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to req target capabilities:%d\n", ret); + return ret; + } + + ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret); + return ret; + } + + ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to load board data file:%d\n", ret); + return ret; + } + + if (ab->hw_params->download_calib) { + ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION); + if (ret < 0) + ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret); + } + + ret = ath12k_qmi_wlanfw_m3_info_send(ab); + if (ret < 0) { + ath12k_warn(ab, "qmi failed to send m3 info req:%d\n", ret); + return ret; + } + + return ret; +} + +static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle); + struct ath12k_base *ab = qmi->ab; + const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data; + int i, ret; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware request memory request\n"); + + if (msg->mem_seg_len == 0 || + msg->mem_seg_len > ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01) + ath12k_warn(ab, "Invalid memory segment length: %u\n", + msg->mem_seg_len); + + ab->qmi.mem_seg_count = msg->mem_seg_len; + + for (i = 0; i < qmi->mem_seg_count ; i++) { + ab->qmi.target_mem[i].type = msg->mem_seg[i].type; + ab->qmi.target_mem[i].size = msg->mem_seg[i].size; + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n", + msg->mem_seg[i].type, msg->mem_seg[i].size); + } + + ret = ath12k_qmi_alloc_target_mem_chunk(ab); + if (ret) { + ath12k_warn(ab, "qmi failed to alloc target memory: %d\n", + ret); + return; + } + + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL); +} + +static void ath12k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle); + struct ath12k_base *ab = qmi->ab; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware memory ready indication\n"); + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_MEM_READY, NULL); +} + +static void ath12k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle); + struct ath12k_base *ab = qmi->ab; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware ready\n"); + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL); +} + +static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01, + .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei, + .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01), + .fn = ath12k_qmi_msg_mem_request_cb, + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01, + .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei, + .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01), + .fn = ath12k_qmi_msg_mem_ready_cb, + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_FW_READY_IND_V01, + .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei, + .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), + .fn = ath12k_qmi_msg_fw_ready_cb, + }, +}; + +static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle); + struct ath12k_base *ab = qmi->ab; + struct sockaddr_qrtr *sq = &qmi->sq; + int ret; + + sq->sq_family = AF_QIPCRTR; + sq->sq_node = service->node; + sq->sq_port = service->port; + + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, + sizeof(*sq), 0); + if (ret) { + ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret); + return ret; + } + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw qmi service connected\n"); + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_ARRIVE, NULL); + + return ret; +} + +static void ath12k_qmi_ops_del_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle); + struct ath12k_base *ab = qmi->ab; + + ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw del server\n"); + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_EXIT, NULL); +} + +static const struct qmi_ops ath12k_qmi_ops = { + .new_server = ath12k_qmi_ops_new_server, + .del_server = ath12k_qmi_ops_del_server, +}; + +static void ath12k_qmi_driver_event_work(struct work_struct *work) +{ + struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi, + event_work); + struct ath12k_qmi_driver_event *event; + struct ath12k_base *ab = qmi->ab; + int ret; + + spin_lock(&qmi->event_lock); + while (!list_empty(&qmi->event_list)) { + event = list_first_entry(&qmi->event_list, + struct ath12k_qmi_driver_event, list); + list_del(&event->list); + spin_unlock(&qmi->event_lock); + + if (test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)) + return; + + switch (event->type) { + case ATH12K_QMI_EVENT_SERVER_ARRIVE: + ret = ath12k_qmi_event_server_arrive(qmi); + if (ret < 0) + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + break; + case ATH12K_QMI_EVENT_SERVER_EXIT: + set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); + set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); + break; + case ATH12K_QMI_EVENT_REQUEST_MEM: + ret = ath12k_qmi_event_mem_request(qmi); + if (ret < 0) + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + break; + case ATH12K_QMI_EVENT_FW_MEM_READY: + ret = ath12k_qmi_event_load_bdf(qmi); + if (ret < 0) + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + break; + case ATH12K_QMI_EVENT_FW_READY: + clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { + ath12k_hal_dump_srng_stats(ab); + queue_work(ab->workqueue, &ab->restart_work); + break; + } + + clear_bit(ATH12K_FLAG_CRASH_FLUSH, + &ab->dev_flags); + clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); + ath12k_core_qmi_firmware_ready(ab); + set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); + + break; + default: + ath12k_warn(ab, "invalid event type: %d", event->type); + break; + } + kfree(event); + spin_lock(&qmi->event_lock); + } + spin_unlock(&qmi->event_lock); +} + +int ath12k_qmi_init_service(struct ath12k_base *ab) +{ + int ret; + + memset(&ab->qmi.target, 0, sizeof(struct target_info)); + memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk)); + ab->qmi.ab = ab; + + ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT; + ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX, + &ath12k_qmi_ops, ath12k_qmi_msg_handlers); + if (ret < 0) { + ath12k_warn(ab, "failed to initialize qmi handle\n"); + return ret; + } + + ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event", + WQ_UNBOUND, 1); + if (!ab->qmi.event_wq) { + ath12k_err(ab, "failed to allocate workqueue\n"); + return -EFAULT; + } + + INIT_LIST_HEAD(&ab->qmi.event_list); + spin_lock_init(&ab->qmi.event_lock); + INIT_WORK(&ab->qmi.event_work, ath12k_qmi_driver_event_work); + + ret = qmi_add_lookup(&ab->qmi.handle, ATH12K_QMI_WLFW_SERVICE_ID_V01, + ATH12K_QMI_WLFW_SERVICE_VERS_V01, + ab->qmi.service_ins_id); + if (ret < 0) { + ath12k_warn(ab, "failed to add qmi lookup\n"); + destroy_workqueue(ab->qmi.event_wq); + return ret; + } + + return ret; +} + +void ath12k_qmi_deinit_service(struct ath12k_base *ab) +{ + qmi_handle_release(&ab->qmi.handle); + cancel_work_sync(&ab->qmi.event_work); + destroy_workqueue(ab->qmi.event_wq); + ath12k_qmi_m3_free(ab); + ath12k_qmi_free_target_mem_chunk(ab); +} diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h new file mode 100644 index 000000000000..ad87f19903db --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_QMI_H +#define ATH12K_QMI_H + +#include <linux/mutex.h> +#include <linux/soc/qcom/qmi.h> + +#define ATH12K_HOST_VERSION_STRING "WIN" +#define ATH12K_QMI_WLANFW_TIMEOUT_MS 10000 +#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64 +#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000 +#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128 +#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07 +#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45 +#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01 +#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02 +#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1 + +#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274 0x07 +#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32 +#define ATH12K_QMI_RESP_LEN_MAX 8192 +#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 +#define ATH12K_QMI_CALDB_SIZE 0x480000 +#define ATH12K_QMI_BDF_EXT_STR_LENGTH 0x20 +#define ATH12K_QMI_FW_MEM_REQ_SEGMENT_CNT 3 +#define ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01 4 +#define ATH12K_QMI_DEVMEM_CMEM_INDEX 0 + +#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 +#define QMI_WLFW_FW_READY_IND_V01 0x0038 + +#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144 +#define ATH12K_FIRMWARE_MODE_OFF 4 +#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT 0 + +#define ATH12K_BOARD_ID_DEFAULT 0xFF + +struct ath12k_base; + +enum ath12k_qmi_file_type { + ATH12K_QMI_FILE_TYPE_BDF_GOLDEN = 0, + ATH12K_QMI_FILE_TYPE_CALDATA = 2, + ATH12K_QMI_FILE_TYPE_EEPROM = 3, + ATH12K_QMI_MAX_FILE_TYPE = 4, +}; + +enum ath12k_qmi_bdf_type { + ATH12K_QMI_BDF_TYPE_BIN = 0, + ATH12K_QMI_BDF_TYPE_ELF = 1, + ATH12K_QMI_BDF_TYPE_REGDB = 4, + ATH12K_QMI_BDF_TYPE_CALIBRATION = 5, +}; + +enum ath12k_qmi_event_type { + ATH12K_QMI_EVENT_SERVER_ARRIVE, + ATH12K_QMI_EVENT_SERVER_EXIT, + ATH12K_QMI_EVENT_REQUEST_MEM, + ATH12K_QMI_EVENT_FW_MEM_READY, + ATH12K_QMI_EVENT_FW_READY, + ATH12K_QMI_EVENT_REGISTER_DRIVER, + ATH12K_QMI_EVENT_UNREGISTER_DRIVER, + ATH12K_QMI_EVENT_RECOVERY, + ATH12K_QMI_EVENT_FORCE_FW_ASSERT, + ATH12K_QMI_EVENT_POWER_UP, + ATH12K_QMI_EVENT_POWER_DOWN, + ATH12K_QMI_EVENT_MAX, +}; + +struct ath12k_qmi_driver_event { + struct list_head list; + enum ath12k_qmi_event_type type; + void *data; +}; + +struct ath12k_qmi_ce_cfg { + const struct ce_pipe_config *tgt_ce; + int tgt_ce_len; + const struct service_to_pipe *svc_to_ce_map; + int svc_to_ce_map_len; + const u8 *shadow_reg; + int shadow_reg_len; + u32 *shadow_reg_v3; + int shadow_reg_v3_len; +}; + +struct ath12k_qmi_event_msg { + struct list_head list; + enum ath12k_qmi_event_type type; +}; + +struct target_mem_chunk { + u32 size; + u32 type; + dma_addr_t paddr; + union { + void __iomem *ioaddr; + void *addr; + } v; +}; + +struct target_info { + u32 chip_id; + u32 chip_family; + u32 board_id; + u32 soc_id; + u32 fw_version; + u32 eeprom_caldata; + char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1]; + char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1]; + char bdf_ext[ATH12K_QMI_BDF_EXT_STR_LENGTH]; +}; + +struct m3_mem_region { + u32 size; + dma_addr_t paddr; + void *vaddr; +}; + +struct dev_mem_info { + u64 start; + u64 size; +}; + +struct ath12k_qmi { + struct ath12k_base *ab; + struct qmi_handle handle; + struct sockaddr_qrtr sq; + struct work_struct event_work; + struct workqueue_struct *event_wq; + struct list_head event_list; + spinlock_t event_lock; /* spinlock for qmi event list */ + struct ath12k_qmi_ce_cfg ce_cfg; + struct target_mem_chunk target_mem[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01]; + u32 mem_seg_count; + u32 target_mem_mode; + bool target_mem_delayed; + u8 cal_done; + struct target_info target; + struct m3_mem_region m3_mem; + unsigned int service_ins_id; + struct dev_mem_info dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01]; +}; + +#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261 +#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034 +#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 +#define QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 64 +#define QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01 3 + +struct qmi_wlanfw_host_ddr_range { + u64 start; + u64 size; +}; + +enum ath12k_qmi_target_mem { + HOST_DDR_REGION_TYPE = 0x1, + BDF_MEM_REGION_TYPE = 0x2, + M3_DUMP_REGION_TYPE = 0x3, + CALDB_MEM_REGION_TYPE = 0x4, + PAGEABLE_MEM_REGION_TYPE = 0x9, +}; + +enum qmi_wlanfw_host_build_type { + WLANFW_HOST_BUILD_TYPE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLANFW_HOST_BUILD_TYPE_UNSPECIFIED_V01 = 0, + QMI_WLANFW_HOST_BUILD_TYPE_PRIMARY_V01 = 1, + QMI_WLANFW_HOST_BUILD_TYPE_SECONDARY_V01 = 2, + WLANFW_HOST_BUILD_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +#define QMI_WLFW_MAX_NUM_MLO_CHIPS_V01 3 +#define QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01 2 + +struct wlfw_host_mlo_chip_info_s_v01 { + u8 chip_id; + u8 num_local_links; + u8 hw_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01]; + u8 valid_mlo_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01]; +}; + +enum ath12k_qmi_cnss_feature { + CNSS_FEATURE_MIN_ENUM_VAL_V01 = INT_MIN, + CNSS_QDSS_CFG_MISS_V01 = 3, + CNSS_MAX_FEATURE_V01 = 64, + CNSS_FEATURE_MAX_ENUM_VAL_V01 = INT_MAX, +}; + +struct qmi_wlanfw_host_cap_req_msg_v01 { + u8 num_clients_valid; + u32 num_clients; + u8 wake_msi_valid; + u32 wake_msi; + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; + u8 cal_duration_valid; + u16 cal_duraiton; + u8 platform_name_valid; + char platform_name[QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1]; + u8 ddr_range_valid; + struct qmi_wlanfw_host_ddr_range ddr_range[QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01]; + u8 host_build_type_valid; + enum qmi_wlanfw_host_build_type host_build_type; + u8 mlo_capable_valid; + u8 mlo_capable; + u8 mlo_chip_id_valid; + u16 mlo_chip_id; + u8 mlo_group_id_valid; + u8 mlo_group_id; + u8 max_mlo_peer_valid; + u16 max_mlo_peer; + u8 mlo_num_chips_valid; + u8 mlo_num_chips; + u8 mlo_chip_info_valid; + struct wlfw_host_mlo_chip_info_s_v01 mlo_chip_info[QMI_WLFW_MAX_NUM_MLO_CHIPS_V01]; + u8 feature_list_valid; + u64 feature_list; + +}; + +struct qmi_wlanfw_host_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54 +#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020 +#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18 +#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020 +#define QMI_WLANFW_CLIENT_ID 0x4b4e454c + +struct qmi_wlanfw_ind_register_req_msg_v01 { + u8 fw_ready_enable_valid; + u8 fw_ready_enable; + u8 initiate_cal_download_enable_valid; + u8 initiate_cal_download_enable; + u8 initiate_cal_update_enable_valid; + u8 initiate_cal_update_enable; + u8 msa_ready_enable_valid; + u8 msa_ready_enable; + u8 pin_connect_result_enable_valid; + u8 pin_connect_result_enable; + u8 client_id_valid; + u32 client_id; + u8 request_mem_enable_valid; + u8 request_mem_enable; + u8 fw_mem_ready_enable_valid; + u8 fw_mem_ready_enable; + u8 fw_init_done_enable_valid; + u8 fw_init_done_enable; + u8 rejuvenate_enable_valid; + u32 rejuvenate_enable; + u8 xo_cal_enable_valid; + u8 xo_cal_enable; + u8 cal_done_enable_valid; + u8 cal_done_enable; +}; + +struct qmi_wlanfw_ind_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 fw_status_valid; + u64 fw_status; +}; + +#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824 +#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888 +#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036 +#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036 +#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2 +#define QMI_WLANFW_MAX_STR_LEN_V01 16 + +struct qmi_wlanfw_mem_cfg_s_v01 { + u64 offset; + u32 size; + u8 secure_flag; +}; + +enum qmi_wlanfw_mem_type_enum_v01 { + WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLANFW_MEM_TYPE_MSA_V01 = 0, + QMI_WLANFW_MEM_TYPE_DDR_V01 = 1, + QMI_WLANFW_MEM_BDF_V01 = 2, + QMI_WLANFW_MEM_M3_V01 = 3, + QMI_WLANFW_MEM_CAL_V01 = 4, + QMI_WLANFW_MEM_DPD_V01 = 5, + WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +struct qmi_wlanfw_mem_seg_s_v01 { + u32 size; + enum qmi_wlanfw_mem_type_enum_v01 type; + u32 mem_cfg_len; + struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01]; +}; + +struct qmi_wlanfw_request_mem_ind_msg_v01 { + u32 mem_seg_len; + struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01]; +}; + +struct qmi_wlanfw_mem_seg_resp_s_v01 { + u64 addr; + u32 size; + enum qmi_wlanfw_mem_type_enum_v01 type; + u8 restore; +}; + +struct qmi_wlanfw_respond_mem_req_msg_v01 { + u32 mem_seg_len; + struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01]; +}; + +struct qmi_wlanfw_respond_mem_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 { + char placeholder; +}; + +struct qmi_wlanfw_fw_ready_ind_msg_v01 { + char placeholder; +}; + +#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0 +#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207 +#define QMI_WLANFW_CAP_REQ_V01 0x0024 +#define QMI_WLANFW_CAP_RESP_V01 0x0024 + +enum qmi_wlanfw_pipedir_enum_v01 { + QMI_WLFW_PIPEDIR_NONE_V01 = 0, + QMI_WLFW_PIPEDIR_IN_V01 = 1, + QMI_WLFW_PIPEDIR_OUT_V01 = 2, + QMI_WLFW_PIPEDIR_INOUT_V01 = 3, +}; + +struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 { + __le32 pipe_num; + __le32 pipe_dir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; +}; + +struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 { + __le32 service_id; + __le32 pipe_dir; + __le32 pipe_num; +}; + +struct qmi_wlanfw_shadow_reg_cfg_s_v01 { + u16 id; + u16 offset; +}; + +struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01 { + u32 addr; +}; + +struct qmi_wlanfw_memory_region_info_s_v01 { + u64 region_addr; + u32 size; + u8 secure_flag; +}; + +struct qmi_wlanfw_rf_chip_info_s_v01 { + u32 chip_id; + u32 chip_family; +}; + +struct qmi_wlanfw_rf_board_info_s_v01 { + u32 board_id; +}; + +struct qmi_wlanfw_soc_info_s_v01 { + u32 soc_id; +}; + +struct qmi_wlanfw_fw_version_info_s_v01 { + u32 fw_version; + char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1]; +}; + +struct qmi_wlanfw_dev_mem_info_s_v01 { + u64 start; + u64 size; +}; + +enum qmi_wlanfw_cal_temp_id_enum_v01 { + QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0, + QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1, + QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2, + QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3, + QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4, + QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF, +}; + +enum qmi_wlanfw_rd_card_chain_cap_v01 { + WLFW_RD_CARD_CHAIN_CAP_MIN_VAL_V01 = INT_MIN, + WLFW_RD_CARD_CHAIN_CAP_UNSPECIFIED_V01 = 0, + WLFW_RD_CARD_CHAIN_CAP_1x1_V01 = 1, + WLFW_RD_CARD_CHAIN_CAP_2x2_V01 = 2, + WLFW_RD_CARD_CHAIN_CAP_MAX_VAL_V01 = INT_MAX, +}; + +struct qmi_wlanfw_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 chip_info_valid; + struct qmi_wlanfw_rf_chip_info_s_v01 chip_info; + u8 board_info_valid; + struct qmi_wlanfw_rf_board_info_s_v01 board_info; + u8 soc_info_valid; + struct qmi_wlanfw_soc_info_s_v01 soc_info; + u8 fw_version_info_valid; + struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info; + u8 fw_build_id_valid; + char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1]; + u8 num_macs_valid; + u8 num_macs; + u8 voltage_mv_valid; + u32 voltage_mv; + u8 time_freq_hz_valid; + u32 time_freq_hz; + u8 otp_version_valid; + u32 otp_version; + u8 eeprom_caldata_read_timeout_valid; + u32 eeprom_caldata_read_timeout; + u8 fw_caps_valid; + u64 fw_caps; + u8 rd_card_chain_cap_valid; + enum qmi_wlanfw_rd_card_chain_cap_v01 rd_card_chain_cap; + u8 dev_mem_info_valid; + struct qmi_wlanfw_dev_mem_info_s_v01 dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01]; +}; + +struct qmi_wlanfw_cap_req_msg_v01 { + char placeholder; +}; + +#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182 +#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025 +#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025 +/* TODO: Need to check with MCL and FW team that data can be pointer and + * can be last element in structure + */ +struct qmi_wlanfw_bdf_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum qmi_wlanfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; + u8 bdf_type_valid; + u8 bdf_type; + +}; + +struct qmi_wlanfw_bdf_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 +#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C +#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C + +struct qmi_wlanfw_m3_info_req_msg_v01 { + u64 addr; + u32 size; +}; + +struct qmi_wlanfw_m3_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11 +#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803 +#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022 +#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022 +#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023 +#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023 +#define QMI_WLANFW_MAX_STR_LEN_V01 16 +#define QMI_WLANFW_MAX_NUM_CE_V01 12 +#define QMI_WLANFW_MAX_NUM_SVC_V01 24 +#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24 +#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01 60 + +struct qmi_wlanfw_wlan_mode_req_msg_v01 { + u32 mode; + u8 hw_debug_valid; + u8 hw_debug; +}; + +struct qmi_wlanfw_wlan_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct qmi_wlanfw_wlan_cfg_req_msg_v01 { + u8 host_version_valid; + char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1]; + u8 tgt_cfg_valid; + u32 tgt_cfg_len; + struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 + tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01]; + u8 svc_cfg_valid; + u32 svc_cfg_len; + struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 + svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01]; + u8 shadow_reg_valid; + u32 shadow_reg_len; + struct qmi_wlanfw_shadow_reg_cfg_s_v01 + shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01]; + u8 shadow_reg_v3_valid; + u32 shadow_reg_v3_len; + struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01 + shadow_reg_v3[QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01]; +}; + +struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +int ath12k_qmi_firmware_start(struct ath12k_base *ab, + u32 mode); +void ath12k_qmi_firmware_stop(struct ath12k_base *ab); +void ath12k_qmi_event_work(struct work_struct *work); +void ath12k_qmi_msg_recv_work(struct work_struct *work); +void ath12k_qmi_deinit_service(struct ath12k_base *ab); +int ath12k_qmi_init_service(struct ath12k_base *ab); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c new file mode 100644 index 000000000000..6ede91ebc8e1 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -0,0 +1,732 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/rtnetlink.h> +#include "core.h" +#include "debug.h" + +/* World regdom to be used in case default regd from fw is unavailable */ +#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0) +#define ATH12K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\ + NL80211_RRF_NO_IR) +#define ATH12K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\ + NL80211_RRF_NO_IR) + +#define ETSI_WEATHER_RADAR_BAND_LOW 5590 +#define ETSI_WEATHER_RADAR_BAND_HIGH 5650 +#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000 + +static const struct ieee80211_regdomain ath12k_world_regd = { + .n_reg_rules = 3, + .alpha2 = "00", + .reg_rules = { + ATH12K_2GHZ_CH01_11, + ATH12K_5GHZ_5150_5350, + ATH12K_5GHZ_5725_5850, + } +}; + +static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2) +{ + const struct ieee80211_regdomain *regd; + + regd = rcu_dereference_rtnl(ar->hw->wiphy->regd); + /* This can happen during wiphy registration where the previous + * user request is received before we update the regd received + * from firmware. + */ + if (!regd) + return true; + + return memcmp(regd->alpha2, alpha2, 2) != 0; +} + +static void +ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ath12k_wmi_init_country_arg arg; + struct ath12k *ar = hw->priv; + int ret; + + ath12k_dbg(ar->ab, ATH12K_DBG_REG, + "Regulatory Notification received for %s\n", wiphy_name(wiphy)); + + /* Currently supporting only General User Hints. Cell base user + * hints to be handled later. + * Hints from other sources like Core, Beacons are not expected for + * self managed wiphy's + */ + if (!(request->initiator == NL80211_REGDOM_SET_BY_USER && + request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) { + ath12k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n"); + return; + } + + if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) { + ath12k_dbg(ar->ab, ATH12K_DBG_REG, + "Country Setting is not allowed\n"); + return; + } + + if (!ath12k_regdom_changes(ar, request->alpha2)) { + ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n"); + return; + } + + /* Set the country code to the firmware and wait for + * the WMI_REG_CHAN_LIST_CC EVENT for updating the + * reg info + */ + arg.flags = ALPHA_IS_SET; + memcpy(&arg.cc_info.alpha2, request->alpha2, 2); + arg.cc_info.alpha2[2] = 0; + + ret = ath12k_wmi_send_init_country_cmd(ar, &arg); + if (ret) + ath12k_warn(ar->ab, + "INIT Country code set to fw failed : %d\n", ret); +} + +int ath12k_reg_update_chan_list(struct ath12k *ar) +{ + struct ieee80211_supported_band **bands; + struct ath12k_wmi_scan_chan_list_arg *arg; + struct ieee80211_channel *channel; + struct ieee80211_hw *hw = ar->hw; + struct ath12k_wmi_channel_arg *ch; + enum nl80211_band band; + int num_channels = 0; + int i, ret; + + bands = hw->wiphy->bands; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + if (bands[band]->channels[i].flags & + IEEE80211_CHAN_DISABLED) + continue; + + num_channels++; + } + } + + if (WARN_ON(!num_channels)) + return -EINVAL; + + arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL); + + if (!arg) + return -ENOMEM; + + arg->pdev_id = ar->pdev->pdev_id; + arg->nallchans = num_channels; + + ch = arg->channel; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + channel = &bands[band]->channels[i]; + + if (channel->flags & IEEE80211_CHAN_DISABLED) + continue; + + /* TODO: Set to true/false based on some condition? */ + ch->allow_ht = true; + ch->allow_vht = true; + ch->allow_he = true; + + ch->dfs_set = + !!(channel->flags & IEEE80211_CHAN_RADAR); + ch->is_chan_passive = !!(channel->flags & + IEEE80211_CHAN_NO_IR); + ch->is_chan_passive |= ch->dfs_set; + ch->mhz = channel->center_freq; + ch->cfreq1 = channel->center_freq; + ch->minpower = 0; + ch->maxpower = channel->max_power * 2; + ch->maxregpower = channel->max_reg_power * 2; + ch->antennamax = channel->max_antenna_gain * 2; + + /* TODO: Use appropriate phymodes */ + if (channel->band == NL80211_BAND_2GHZ) + ch->phy_mode = MODE_11G; + else + ch->phy_mode = MODE_11A; + + if (channel->band == NL80211_BAND_6GHZ && + cfg80211_channel_is_psc(channel)) + ch->psc_channel = true; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", + i, arg->nallchans, + ch->mhz, ch->maxpower, ch->maxregpower, + ch->antennamax, ch->phy_mode); + + ch++; + /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2 + * set_agile, reg_class_idx + */ + } + } + + ret = ath12k_wmi_send_scan_chan_list_cmd(ar, arg); + kfree(arg); + + return ret; +} + +static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig, + struct ieee80211_regdomain *regd_copy) +{ + u8 i; + + /* The caller should have checked error conditions */ + memcpy(regd_copy, regd_orig, sizeof(*regd_orig)); + + for (i = 0; i < regd_orig->n_reg_rules; i++) + memcpy(®d_copy->reg_rules[i], ®d_orig->reg_rules[i], + sizeof(struct ieee80211_reg_rule)); +} + +int ath12k_regd_update(struct ath12k *ar, bool init) +{ + struct ieee80211_regdomain *regd, *regd_copy = NULL; + int ret, regd_len, pdev_id; + struct ath12k_base *ab; + + ab = ar->ab; + pdev_id = ar->pdev_idx; + + spin_lock_bh(&ab->base_lock); + + if (init) { + /* Apply the regd received during init through + * WMI_REG_CHAN_LIST_CC event. In case of failure to + * receive the regd, initialize with a default world + * regulatory. + */ + if (ab->default_regd[pdev_id]) { + regd = ab->default_regd[pdev_id]; + } else { + ath12k_warn(ab, + "failed to receive default regd during init\n"); + regd = (struct ieee80211_regdomain *)&ath12k_world_regd; + } + } else { + regd = ab->new_regd[pdev_id]; + } + + if (!regd) { + ret = -EINVAL; + spin_unlock_bh(&ab->base_lock); + goto err; + } + + regd_len = sizeof(*regd) + (regd->n_reg_rules * + sizeof(struct ieee80211_reg_rule)); + + regd_copy = kzalloc(regd_len, GFP_ATOMIC); + if (regd_copy) + ath12k_copy_regd(regd, regd_copy); + + spin_unlock_bh(&ab->base_lock); + + if (!regd_copy) { + ret = -ENOMEM; + goto err; + } + + rtnl_lock(); + wiphy_lock(ar->hw->wiphy); + ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); + wiphy_unlock(ar->hw->wiphy); + rtnl_unlock(); + + kfree(regd_copy); + + if (ret) + goto err; + + if (ar->state == ATH12K_STATE_ON) { + ret = ath12k_reg_update_chan_list(ar); + if (ret) + goto err; + } + + return 0; +err: + ath12k_warn(ab, "failed to perform regd update : %d\n", ret); + return ret; +} + +static enum nl80211_dfs_regions +ath12k_map_fw_dfs_region(enum ath12k_dfs_region dfs_region) +{ + switch (dfs_region) { + case ATH12K_DFS_REG_FCC: + case ATH12K_DFS_REG_CN: + return NL80211_DFS_FCC; + case ATH12K_DFS_REG_ETSI: + case ATH12K_DFS_REG_KR: + return NL80211_DFS_ETSI; + case ATH12K_DFS_REG_MKK: + case ATH12K_DFS_REG_MKK_N: + return NL80211_DFS_JP; + default: + return NL80211_DFS_UNSET; + } +} + +static u32 ath12k_map_fw_reg_flags(u16 reg_flags) +{ + u32 flags = 0; + + if (reg_flags & REGULATORY_CHAN_NO_IR) + flags = NL80211_RRF_NO_IR; + + if (reg_flags & REGULATORY_CHAN_RADAR) + flags |= NL80211_RRF_DFS; + + if (reg_flags & REGULATORY_CHAN_NO_OFDM) + flags |= NL80211_RRF_NO_OFDM; + + if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY) + flags |= NL80211_RRF_NO_OUTDOOR; + + if (reg_flags & REGULATORY_CHAN_NO_HT40) + flags |= NL80211_RRF_NO_HT40; + + if (reg_flags & REGULATORY_CHAN_NO_80MHZ) + flags |= NL80211_RRF_NO_80MHZ; + + if (reg_flags & REGULATORY_CHAN_NO_160MHZ) + flags |= NL80211_RRF_NO_160MHZ; + + return flags; +} + +static bool +ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1, + struct ieee80211_reg_rule *rule2) +{ + u32 start_freq1, end_freq1; + u32 start_freq2, end_freq2; + + start_freq1 = rule1->freq_range.start_freq_khz; + start_freq2 = rule2->freq_range.start_freq_khz; + + end_freq1 = rule1->freq_range.end_freq_khz; + end_freq2 = rule2->freq_range.end_freq_khz; + + if ((start_freq1 >= start_freq2 && + start_freq1 < end_freq2) || + (start_freq2 > start_freq1 && + start_freq2 < end_freq1)) + return true; + + /* TODO: Should we restrict intersection feasibility + * based on min bandwidth of the intersected region also, + * say the intersected rule should have a min bandwidth + * of 20MHz? + */ + + return false; +} + +static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1, + struct ieee80211_reg_rule *rule2, + struct ieee80211_reg_rule *new_rule) +{ + u32 start_freq1, end_freq1; + u32 start_freq2, end_freq2; + u32 freq_diff, max_bw; + + start_freq1 = rule1->freq_range.start_freq_khz; + start_freq2 = rule2->freq_range.start_freq_khz; + + end_freq1 = rule1->freq_range.end_freq_khz; + end_freq2 = rule2->freq_range.end_freq_khz; + + new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1, + start_freq2); + new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2); + + freq_diff = new_rule->freq_range.end_freq_khz - + new_rule->freq_range.start_freq_khz; + max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz, + rule2->freq_range.max_bandwidth_khz); + new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff); + + new_rule->power_rule.max_antenna_gain = + min_t(u32, rule1->power_rule.max_antenna_gain, + rule2->power_rule.max_antenna_gain); + + new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp, + rule2->power_rule.max_eirp); + + /* Use the flags of both the rules */ + new_rule->flags = rule1->flags | rule2->flags; + + /* To be safe, lts use the max cac timeout of both rules */ + new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms, + rule2->dfs_cac_ms); +} + +static struct ieee80211_regdomain * +ath12k_regd_intersect(struct ieee80211_regdomain *default_regd, + struct ieee80211_regdomain *curr_regd) +{ + u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules; + struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule; + struct ieee80211_regdomain *new_regd = NULL; + u8 i, j, k; + + num_old_regd_rules = default_regd->n_reg_rules; + num_curr_regd_rules = curr_regd->n_reg_rules; + num_new_regd_rules = 0; + + /* Find the number of intersecting rules to allocate new regd memory */ + for (i = 0; i < num_old_regd_rules; i++) { + old_rule = default_regd->reg_rules + i; + for (j = 0; j < num_curr_regd_rules; j++) { + curr_rule = curr_regd->reg_rules + j; + + if (ath12k_reg_can_intersect(old_rule, curr_rule)) + num_new_regd_rules++; + } + } + + if (!num_new_regd_rules) + return NULL; + + new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules * + sizeof(struct ieee80211_reg_rule)), + GFP_ATOMIC); + + if (!new_regd) + return NULL; + + /* We set the new country and dfs region directly and only trim + * the freq, power, antenna gain by intersecting with the + * default regdomain. Also MAX of the dfs cac timeout is selected. + */ + new_regd->n_reg_rules = num_new_regd_rules; + memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2)); + new_regd->dfs_region = curr_regd->dfs_region; + new_rule = new_regd->reg_rules; + + for (i = 0, k = 0; i < num_old_regd_rules; i++) { + old_rule = default_regd->reg_rules + i; + for (j = 0; j < num_curr_regd_rules; j++) { + curr_rule = curr_regd->reg_rules + j; + + if (ath12k_reg_can_intersect(old_rule, curr_rule)) + ath12k_reg_intersect_rules(old_rule, curr_rule, + (new_rule + k++)); + } + } + return new_regd; +} + +static const char * +ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region) +{ + switch (dfs_region) { + case NL80211_DFS_FCC: + return "FCC"; + case NL80211_DFS_ETSI: + return "ETSI"; + case NL80211_DFS_JP: + return "JP"; + default: + return "UNSET"; + } +} + +static u16 +ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) +{ + u16 bw; + + bw = end_freq - start_freq; + bw = min_t(u16, bw, max_bw); + + if (bw >= 80 && bw < 160) + bw = 80; + else if (bw >= 40 && bw < 80) + bw = 40; + else if (bw < 40) + bw = 20; + + return bw; +} + +static void +ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq, + u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr, + u32 reg_flags) +{ + reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq); + reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq); + reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw); + reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain); + reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr); + reg_rule->flags = reg_flags; +} + +static void +ath12k_reg_update_weather_radar_band(struct ath12k_base *ab, + struct ieee80211_regdomain *regd, + struct ath12k_reg_rule *reg_rule, + u8 *rule_idx, u32 flags, u16 max_bw) +{ + u32 end_freq; + u16 bw; + u8 i; + + i = *rule_idx; + + bw = ath12k_reg_adjust_bw(reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, max_bw); + + ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + ath12k_dbg(ab, ATH12K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH) + end_freq = ETSI_WEATHER_RADAR_BAND_HIGH; + else + end_freq = reg_rule->end_freq; + + bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq, + max_bw); + + i++; + + ath12k_reg_update_rule(regd->reg_rules + i, + ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; + + ath12k_dbg(ab, ATH12K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + if (end_freq == reg_rule->end_freq) { + regd->n_reg_rules--; + *rule_idx = i; + return; + } + + bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, max_bw); + + i++; + + ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + ath12k_dbg(ab, ATH12K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + *rule_idx = i; +} + +struct ieee80211_regdomain * +ath12k_reg_build_regd(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info, bool intersect) +{ + struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL; + struct ath12k_reg_rule *reg_rule; + u8 i = 0, j = 0, k = 0; + u8 num_rules; + u16 max_bw; + u32 flags; + char alpha2[3]; + + num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules; + + /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list. + * This can be updated to choose the combination dynamically based on AP + * type and client type, after complete 6G regulatory support is added. + */ + if (reg_info->is_ext_reg_event) + num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP]; + + if (!num_rules) + goto ret; + + /* Add max additional rules to accommodate weather radar band */ + if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI) + num_rules += 2; + + tmp_regd = kzalloc(sizeof(*tmp_regd) + + (num_rules * sizeof(struct ieee80211_reg_rule)), + GFP_ATOMIC); + if (!tmp_regd) + goto ret; + + memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); + memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); + alpha2[2] = '\0'; + tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region); + + ath12k_dbg(ab, ATH12K_DBG_REG, + "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n", + alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region), + reg_info->dfs_region, num_rules); + /* Update reg_rules[] below. Firmware is expected to + * send these rules in order(2G rules first and then 5G) + */ + for (; i < num_rules; i++) { + if (reg_info->num_2g_reg_rules && + (i < reg_info->num_2g_reg_rules)) { + reg_rule = reg_info->reg_rules_2g_ptr + i; + max_bw = min_t(u16, reg_rule->max_bw, + reg_info->max_bw_2g); + flags = 0; + } else if (reg_info->num_5g_reg_rules && + (j < reg_info->num_5g_reg_rules)) { + reg_rule = reg_info->reg_rules_5g_ptr + j++; + max_bw = min_t(u16, reg_rule->max_bw, + reg_info->max_bw_5g); + + /* FW doesn't pass NL80211_RRF_AUTO_BW flag for + * BW Auto correction, we can enable this by default + * for all 5G rules here. The regulatory core performs + * BW correction if required and applies flags as + * per other BW rule flags we pass from here + */ + flags = NL80211_RRF_AUTO_BW; + } else if (reg_info->is_ext_reg_event && + reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] && + (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) { + reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++; + max_bw = min_t(u16, reg_rule->max_bw, + reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]); + flags = NL80211_RRF_AUTO_BW; + } else { + break; + } + + flags |= ath12k_map_fw_reg_flags(reg_rule->flags); + + ath12k_reg_update_rule(tmp_regd->reg_rules + i, + reg_rule->start_freq, + reg_rule->end_freq, max_bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + /* Update dfs cac timeout if the dfs domain is ETSI and the + * new rule covers weather radar band. + * Default value of '0' corresponds to 60s timeout, so no + * need to update that for other rules. + */ + if (flags & NL80211_RRF_DFS && + reg_info->dfs_region == ATH12K_DFS_REG_ETSI && + (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW && + reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){ + ath12k_reg_update_weather_radar_band(ab, tmp_regd, + reg_rule, &i, + flags, max_bw); + continue; + } + + if (reg_info->is_ext_reg_event) { + ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n", + i + 1, reg_rule->start_freq, reg_rule->end_freq, + max_bw, reg_rule->ant_gain, reg_rule->reg_power, + tmp_regd->reg_rules[i].dfs_cac_ms, + flags, reg_rule->psd_flag, reg_rule->psd_eirp); + } else { + ath12k_dbg(ab, ATH12K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, reg_rule->end_freq, + max_bw, reg_rule->ant_gain, reg_rule->reg_power, + tmp_regd->reg_rules[i].dfs_cac_ms, + flags); + } + } + + tmp_regd->n_reg_rules = i; + + if (intersect) { + default_regd = ab->default_regd[reg_info->phy_id]; + + /* Get a new regd by intersecting the received regd with + * our default regd. + */ + new_regd = ath12k_regd_intersect(default_regd, tmp_regd); + kfree(tmp_regd); + if (!new_regd) { + ath12k_warn(ab, "Unable to create intersected regdomain\n"); + goto ret; + } + } else { + new_regd = tmp_regd; + } + +ret: + return new_regd; +} + +void ath12k_regd_update_work(struct work_struct *work) +{ + struct ath12k *ar = container_of(work, struct ath12k, + regd_update_work); + int ret; + + ret = ath12k_regd_update(ar, false); + if (ret) { + /* Firmware has already moved to the new regd. We need + * to maintain channel consistency across FW, Host driver + * and userspace. Hence as a fallback mechanism we can set + * the prev or default country code to the firmware. + */ + /* TODO: Implement Fallback Mechanism */ + } +} + +void ath12k_reg_init(struct ath12k *ar) +{ + ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; + ar->hw->wiphy->reg_notifier = ath12k_reg_notifier; +} + +void ath12k_reg_free(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->max_radios; i++) { + kfree(ab->default_regd[i]); + kfree(ab->new_regd[i]); + } +} diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h new file mode 100644 index 000000000000..56d009a47234 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/reg.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_REG_H +#define ATH12K_REG_H + +#include <linux/kernel.h> +#include <net/regulatory.h> + +struct ath12k_base; +struct ath12k; + +/* DFS regdomains supported by Firmware */ +enum ath12k_dfs_region { + ATH12K_DFS_REG_UNSET, + ATH12K_DFS_REG_FCC, + ATH12K_DFS_REG_ETSI, + ATH12K_DFS_REG_MKK, + ATH12K_DFS_REG_CN, + ATH12K_DFS_REG_KR, + ATH12K_DFS_REG_MKK_N, + ATH12K_DFS_REG_UNDEF, +}; + +enum ath12k_reg_cc_code { + REG_SET_CC_STATUS_PASS = 0, + REG_CURRENT_ALPHA2_NOT_FOUND = 1, + REG_INIT_ALPHA2_NOT_FOUND = 2, + REG_SET_CC_CHANGE_NOT_ALLOWED = 3, + REG_SET_CC_STATUS_NO_MEMORY = 4, + REG_SET_CC_STATUS_FAIL = 5, +}; + +struct ath12k_reg_rule { + u16 start_freq; + u16 end_freq; + u16 max_bw; + u8 reg_power; + u8 ant_gain; + u16 flags; + bool psd_flag; + u16 psd_eirp; +}; + +struct ath12k_reg_info { + enum ath12k_reg_cc_code status_code; + u8 num_phy; + u8 phy_id; + u16 reg_dmn_pair; + u16 ctry_code; + u8 alpha2[REG_ALPHA2_LEN + 1]; + u32 dfs_region; + u32 phybitmap; + bool is_ext_reg_event; + u32 min_bw_2g; + u32 max_bw_2g; + u32 min_bw_5g; + u32 max_bw_5g; + u32 num_2g_reg_rules; + u32 num_5g_reg_rules; + struct ath12k_reg_rule *reg_rules_2g_ptr; + struct ath12k_reg_rule *reg_rules_5g_ptr; + enum wmi_reg_6g_client_type client_type; + bool rnr_tpe_usable; + bool unspecified_ap_usable; + /* TODO: All 6G related info can be stored only for required + * combination instead of all types, to optimize memory usage. + */ + u8 domain_code_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u8 domain_code_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u32 domain_code_6g_super_id; + u32 min_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u32 max_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u32 min_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u32 max_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + struct ath12k_reg_rule *reg_rules_6g_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE]; + struct ath12k_reg_rule *reg_rules_6g_client_ptr + [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; +}; + +void ath12k_reg_init(struct ath12k *ar); +void ath12k_reg_free(struct ath12k_base *ab); +void ath12k_regd_update_work(struct work_struct *work); +struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info, + bool intersect); +int ath12k_regd_update(struct ath12k *ar, bool init); +int ath12k_reg_update_chan_list(struct ath12k *ar); + +#endif diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h new file mode 100644 index 000000000000..5feaff6450ad --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -0,0 +1,1441 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_RX_DESC_H +#define ATH12K_RX_DESC_H + +enum rx_desc_decap_type { + RX_DESC_DECAP_TYPE_RAW, + RX_DESC_DECAP_TYPE_NATIVE_WIFI, + RX_DESC_DECAP_TYPE_ETHERNET2_DIX, + RX_DESC_DECAP_TYPE_8023, +}; + +enum rx_desc_decrypt_status_code { + RX_DESC_DECRYPT_STATUS_CODE_OK, + RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME, + RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR, + RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID, + RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID, + RX_DESC_DECRYPT_STATUS_CODE_OTHER, +}; + +#define RX_MPDU_START_INFO0_REO_DEST_IND GENMASK(4, 0) +#define RX_MPDU_START_INFO0_LMAC_PEER_ID_MSB GENMASK(6, 5) +#define RX_MPDU_START_INFO0_FLOW_ID_TOEPLITZ BIT(7) +#define RX_MPDU_START_INFO0_PKT_SEL_FP_UCAST_DATA BIT(8) +#define RX_MPDU_START_INFO0_PKT_SEL_FP_MCAST_DATA BIT(9) +#define RX_MPDU_START_INFO0_PKT_SEL_FP_CTRL_BAR BIT(10) +#define RX_MPDU_START_INFO0_RXDMA0_SRC_RING_SEL GENMASK(13, 11) +#define RX_MPDU_START_INFO0_RXDMA0_DST_RING_SEL GENMASK(16, 14) +#define RX_MPDU_START_INFO0_MCAST_ECHO_DROP_EN BIT(17) +#define RX_MPDU_START_INFO0_WDS_LEARN_DETECT_EN BIT(18) +#define RX_MPDU_START_INFO0_INTRA_BSS_CHECK_EN BIT(19) +#define RX_MPDU_START_INFO0_USE_PPE BIT(20) +#define RX_MPDU_START_INFO0_PPE_ROUTING_EN BIT(21) + +#define RX_MPDU_START_INFO1_REO_QUEUE_DESC_HI GENMASK(7, 0) +#define RX_MPDU_START_INFO1_RECV_QUEUE_NUM GENMASK(23, 8) +#define RX_MPDU_START_INFO1_PRE_DELIM_ERR_WARN BIT(24) +#define RX_MPDU_START_INFO1_FIRST_DELIM_ERR BIT(25) + +#define RX_MPDU_START_INFO2_EPD_EN BIT(0) +#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1) +#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2) +#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6) +#define RX_MPDU_START_INFO2_MESH_STA GENMASK(9, 8) +#define RX_MPDU_START_INFO2_BSSID_HIT BIT(10) +#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(14, 11) +#define RX_MPDU_START_INFO2_TID GENMASK(18, 15) + +#define RX_MPDU_START_INFO3_RXPCU_MPDU_FLTR GENMASK(1, 0) +#define RX_MPDU_START_INFO3_SW_FRAME_GRP_ID GENMASK(8, 2) +#define RX_MPDU_START_INFO3_NDP_FRAME BIT(9) +#define RX_MPDU_START_INFO3_PHY_ERR BIT(10) +#define RX_MPDU_START_INFO3_PHY_ERR_MPDU_HDR BIT(11) +#define RX_MPDU_START_INFO3_PROTO_VER_ERR BIT(12) +#define RX_MPDU_START_INFO3_AST_LOOKUP_VALID BIT(13) +#define RX_MPDU_START_INFO3_RANGING BIT(14) + +#define RX_MPDU_START_INFO4_MPDU_FCTRL_VALID BIT(0) +#define RX_MPDU_START_INFO4_MPDU_DUR_VALID BIT(1) +#define RX_MPDU_START_INFO4_MAC_ADDR1_VALID BIT(2) +#define RX_MPDU_START_INFO4_MAC_ADDR2_VALID BIT(3) +#define RX_MPDU_START_INFO4_MAC_ADDR3_VALID BIT(4) +#define RX_MPDU_START_INFO4_MAC_ADDR4_VALID BIT(5) +#define RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID BIT(6) +#define RX_MPDU_START_INFO4_MPDU_QOS_CTRL_VALID BIT(7) +#define RX_MPDU_START_INFO4_MPDU_HT_CTRL_VALID BIT(8) +#define RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID BIT(9) +#define RX_MPDU_START_INFO4_MPDU_FRAG_NUMBER GENMASK(13, 10) +#define RX_MPDU_START_INFO4_MORE_FRAG_FLAG BIT(14) +#define RX_MPDU_START_INFO4_FROM_DS BIT(16) +#define RX_MPDU_START_INFO4_TO_DS BIT(17) +#define RX_MPDU_START_INFO4_ENCRYPTED BIT(18) +#define RX_MPDU_START_INFO4_MPDU_RETRY BIT(19) +#define RX_MPDU_START_INFO4_MPDU_SEQ_NUM GENMASK(31, 20) + +#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0) +#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8) +#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9) +#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10) +#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12) +#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13) +#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14) +#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15) +#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16) +#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28) +#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29) +#define RX_MPDU_START_INFO5_RAW_MPDU BIT(30) + +#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0) +#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14) +#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15) +#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16) +#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17) +#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18) +#define RX_MPDU_START_INFO6_NON_QOS BIT(19) +#define RX_MPDU_START_INFO6_NULL_DATA BIT(20) +#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21) +#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22) +#define RX_MPDU_START_INFO6_MORE_DATA BIT(23) +#define RX_MPDU_START_INFO6_EOSP BIT(24) +#define RX_MPDU_START_INFO6_FRAGMENT BIT(25) +#define RX_MPDU_START_INFO6_ORDER BIT(26) +#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27) +#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28) +#define RX_MPDU_START_INFO6_DIRECTED BIT(29) +#define RX_MPDU_START_INFO6_AMSDU_PRESENT BIT(30) + +#define RX_MPDU_START_INFO7_VDEV_ID GENMASK(7, 0) +#define RX_MPDU_START_INFO7_SERVICE_CODE GENMASK(16, 8) +#define RX_MPDU_START_INFO7_PRIORITY_VALID BIT(17) +#define RX_MPDU_START_INFO7_SRC_INFO GENMASK(29, 18) + +#define RX_MPDU_START_INFO8_AUTH_TO_SEND_WDS BIT(0) + +struct rx_mpdu_start_qcn9274 { + __le32 info0; + __le32 reo_queue_desc_lo; + __le32 info1; + __le32 pn[4]; + __le32 info2; + __le32 peer_meta_data; + __le16 info3; + __le16 phy_ppdu_id; + __le16 ast_index; + __le16 sw_peer_id; + __le32 info4; + __le32 info5; + __le32 info6; + __le16 frame_ctrl; + __le16 duration; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + __le16 qos_ctrl; + __le32 ht_ctrl; + __le32 info7; + u8 multi_link_addr1[ETH_ALEN]; + u8 multi_link_addr2[ETH_ALEN]; + __le32 info8; + __le32 res0; + __le32 res1; +} __packed; + +/* rx_mpdu_start + * + * reo_destination_indication + * The id of the reo exit ring where the msdu frame shall push + * after (MPDU level) reordering has finished. Values are defined + * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_. + * + * lmac_peer_id_msb + * + * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_'sb + * is 2'b00, Rx OLE uses a REO destination indicati'n of {1'b1, + * hash[3:0]} using the chosen Toeplitz hash from Common Parser + * if flow search fails. + * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_msb + * 's not 2'b00, Rx OLE uses a REO destination indication of + * {lmac_peer_id_msb, hash[2:0]} using the chosen Toeplitz + * hash from Common Parser if flow search fails. + * + * use_flow_id_toeplitz_clfy + * Indication to Rx OLE to enable REO destination routing based + * on the chosen Toeplitz hash from Common Parser, in case + * flow search fails + * + * pkt_selection_fp_ucast_data + * Filter pass Unicast data frame (matching rxpcu_filter_pass + * and sw_frame_group_Unicast_data) routing selection + * + * pkt_selection_fp_mcast_data + * Filter pass Multicast data frame (matching rxpcu_filter_pass + * and sw_frame_group_Multicast_data) routing selection + * + * pkt_selection_fp_ctrl_bar + * Filter pass BAR frame (matching rxpcu_filter_pass + * and sw_frame_group_ctrl_1000) routing selection + * + * rxdma0_src_ring_selection + * Field only valid when for the received frame type the corresponding + * pkt_selection_fp_... bit is set + * + * rxdma0_dst_ring_selection + * Field only valid when for the received frame type the corresponding + * pkt_selection_fp_... bit is set + * + * mcast_echo_drop_enable + * If set, for multicast packets, multicast echo check (i.e. + * SA search with mcast_echo_check = 1) shall be performed + * by RXOLE, and any multicast echo packets should be indicated + * to RXDMA for release to WBM + * + * wds_learning_detect_en + * If set, WDS learning detection based on SA search and notification + * to FW (using RXDMA0 status ring) is enabled and the "timestamp" + * field in address search failure cache-only entry should + * be used to avoid multiple WDS learning notifications. + * + * intrabss_check_en + * If set, intra-BSS routing detection is enabled + * + * use_ppe + * Indicates to RXDMA to ignore the REO_destination_indication + * and use a programmed value corresponding to the REO2PPE + * ring + * This override to REO2PPE for packets requiring multiple + * buffers shall be disabled based on an RXDMA configuration, + * as PPE may not support such packets. + * + * Supported only in full AP chips, not in client/soft + * chips + * + * ppe_routing_enable + * Global enable/disable bit for routing to PPE, used to disable + * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE' + * This is set by SW for peers which are being handled by a + * host SW/accelerator subsystem that also handles packet + * uffer management for WiFi-to-PPE routing. + * + * This is cleared by SW for peers which are being handled + * by a different subsystem, completely disabling WiFi-to-PPE + * routing for such peers. + * + * rx_reo_queue_desc_addr_lo + * Address (lower 32 bits) of the REO queue descriptor. + * + * rx_reo_queue_desc_addr_hi + * Address (upper 8 bits) of the REO queue descriptor. + * + * receive_queue_number + * Indicates the MPDU queue ID to which this MPDU link + * descriptor belongs. + * + * pre_delim_err_warning + * Indicates that a delimiter FCS error was found in between the + * previous MPDU and this MPDU. Note that this is just a warning, + * and does not mean that this MPDU is corrupted in any way. If + * it is, there will be other errors indicated such as FCS or + * decrypt errors. + * + * first_delim_err + * Indicates that the first delimiter had a FCS failure. + * + * pn + * The PN number. + * + * epd_en + * Field only valid when AST_based_lookup_valid == 1. + * In case of ndp or phy_err or AST_based_lookup_valid == 0, + * this field will be set to 0 + * If set to one use EPD instead of LPD + * In case of ndp or phy_err, this field will never be set. + * + * all_frames_shall_be_encrypted + * In case of ndp or phy_err or AST_based_lookup_valid == 0, + * this field will be set to 0 + * + * When set, all frames (data only ?) shall be encrypted. If + * not, RX CRYPTO shall set an error flag. + * + * + * encrypt_type + * In case of ndp or phy_err or AST_based_lookup_valid == 0, + * this field will be set to 0 + * + * Indicates type of decrypt cipher used (as defined in the + * peer entry) + * + * wep_key_width_for_variable_key + * + * Field only valid when key_type is set to wep_varied_width. + * + * mesh_sta + * + * bssid_hit + * When set, the BSSID of the incoming frame matched one of + * the 8 BSSID register values + * bssid_number + * Field only valid when bssid_hit is set. + * This number indicates which one out of the 8 BSSID register + * values matched the incoming frame + * + * tid + * Field only valid when mpdu_qos_control_valid is set + * The TID field in the QoS control field + * + * peer_meta_data + * Meta data that SW has programmed in the Peer table entry + * of the transmitting STA. + * + * rxpcu_mpdu_filter_in_category + * Field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. Values + * are defined in enum %RX_DESC_RXPCU_FILTER_*. + * + * sw_frame_group_id + * SW processes frames based on certain classifications. Values + * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*. + * + * ndp_frame + * When set, the received frame was an NDP frame, and thus + * there will be no MPDU data. + * phy_err + * When set, a PHY error was received before MAC received any + * data, and thus there will be no MPDU data. + * + * phy_err_during_mpdu_header + * When set, a PHY error was received before MAC received the + * complete MPDU header which was needed for proper decoding + * + * protocol_version_err + * Set when RXPCU detected a version error in the Frame control + * field + * + * ast_based_lookup_valid + * When set, AST based lookup for this frame has found a valid + * result. + * + * ranging + * When set, a ranging NDPA or a ranging NDP was received. + * + * phy_ppdu_id + * A ppdu counter value that PHY increments for every PPDU + * received. The counter value wraps around. + * + * ast_index + * + * This field indicates the index of the AST entry corresponding + * to this MPDU. It is provided by the GSE module instantiated + * in RXPCU. + * A value of 0xFFFF indicates an invalid AST index, meaning + * that No AST entry was found or NO AST search was performed + * + * sw_peer_id + * In case of ndp or phy_err or AST_based_lookup_valid == 0, + * this field will be set to 0 + * This field indicates a unique peer identifier. It is set + * equal to field 'sw_peer_id' from the AST entry + * + * frame_control_valid + * When set, the field Mpdu_Frame_control_field has valid information + * + * frame_duration_valid + * When set, the field Mpdu_duration_field has valid information + * + * mac_addr_ad1..4_valid + * When set, the fields mac_addr_adx_..... have valid information + * + * mpdu_seq_ctrl_valid + * + * When set, the fields mpdu_sequence_control_field and mpdu_sequence_number + * have valid information as well as field + * For MPDUs without a sequence control field, this field will + * not be set. + * + * mpdu_qos_ctrl_valid, mpdu_ht_ctrl_valid + * + * When set, the field mpdu_qos_control_field, mpdu_ht_control has valid + * information, For MPDUs without a QoS,HT control field, this field + * will not be set. + * + * frame_encryption_info_valid + * + * When set, the encryption related info fields, like IV and + * PN are valid + * For MPDUs that are not encrypted, this will not be set. + * + * mpdu_fragment_number + * + * Field only valid when Mpdu_sequence_control_valid is set + * AND Fragment_flag is set. The fragment number from the 802.11 header + * + * more_fragment_flag + * + * The More Fragment bit setting from the MPDU header of the + * received frame + * + * fr_ds + * + * Field only valid when Mpdu_frame_control_valid is set + * Set if the from DS bit is set in the frame control. + * + * to_ds + * + * Field only valid when Mpdu_frame_control_valid is set + * Set if the to DS bit is set in the frame control. + * + * encrypted + * + * Field only valid when Mpdu_frame_control_valid is set. + * Protected bit from the frame control. + * + * mpdu_retry + * Field only valid when Mpdu_frame_control_valid is set. + * Retry bit from the frame control. Only valid when first_msdu is set + * + * mpdu_sequence_number + * Field only valid when Mpdu_sequence_control_valid is set. + * + * The sequence number from the 802.11 header. + * key_id + * The key ID octet from the IV. + * Field only valid when Frame_encryption_info_valid is set + * + * new_peer_entry + * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY + * doesn't follow so RX DECRYPTION module either uses old peer + * entry or not decrypt. + * + * decrypt_needed + * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout', + * RXPCU will also ensure that this bit is NOT set. CRYPTO for that + * reason only needs to evaluate this bit and non of the other ones + * + * decap_type + * Used by the OLE during decapsulation. Values are defined in + * enum %MPDU_START_DECAP_TYPE_*. + * + * rx_insert_vlan_c_tag_padding + * rx_insert_vlan_s_tag_padding + * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if + * the rx payload does not have VLAN. + * + * strip_vlan_c_tag_decap + * strip_vlan_s_tag_decap + * Strip VLAN or double VLAN during decapsulation. + * + * pre_delim_count + * The number of delimiters before this MPDU. Note that this + * number is cleared at PPDU start. If this MPDU is the first + * received MPDU in the PPDU and this MPDU gets filtered-in, + * this field will indicate the number of delimiters located + * after the last MPDU in the previous PPDU. + * + * If this MPDU is located after the first received MPDU in + * an PPDU, this field will indicate the number of delimiters + * located between the previous MPDU and this MPDU. + * + * ampdu_flag + * Received frame was part of an A-MPDU. + * + * bar_frame + * Received frame is a BAR frame + * + * raw_mpdu + * Set when no 802.11 to nwifi/ethernet hdr conversion is done + * + * mpdu_length + * MPDU length before decapsulation. + * + * first_mpdu + * Indicates the first MSDU of the PPDU. If both first_mpdu + * and last_mpdu are set in the MSDU then this is a not an + * A-MPDU frame but a stand alone MPDU. Interior MPDU in an + * A-MPDU shall have both first_mpdu and last_mpdu bits set to + * 0. The PPDU start status will only be valid when this bit + * is set. + * + * mcast_bcast + * Multicast / broadcast indicator. Only set when the MAC + * address 1 bit 0 is set indicating mcast/bcast and the BSSID + * matches one of the 4 BSSID registers. Only set when + * first_msdu is set. + * + * ast_index_not_found + * Only valid when first_msdu is set. Indicates no AST matching + * entries within the max search count. + * + * ast_index_timeout + * Only valid when first_msdu is set. Indicates an unsuccessful + * search in the address search table due to timeout. + * + * power_mgmt + * Power management bit set in the 802.11 header. Only set + * when first_msdu is set. + * + * non_qos + * Set if packet is not a non-QoS data frame. Only set when + * first_msdu is set. + * + * null_data + * Set if frame type indicates either null data or QoS null + * data format. Only set when first_msdu is set. + * + * mgmt_type + * Set if packet is a management packet. Only set when + * first_msdu is set. + * + * ctrl_type + * Set if packet is a control packet. Only set when first_msdu + * is set. + * + * more_data + * Set if more bit in frame control is set. Only set when + * first_msdu is set. + * + * eosp + * Set if the EOSP (end of service period) bit in the QoS + * control field is set. Only set when first_msdu is set. + * + * + * fragment_flag + * Fragment indication + * + * order + * Set if the order bit in the frame control is set. Only + * set when first_msdu is set. + * + * u_apsd_trigger + * U-APSD trigger frame + * + * encrypt_required + * Indicates that this data type frame is not encrypted even if + * the policy for this MPDU requires encryption as indicated in + * the peer table key type. + * + * directed + * MPDU is a directed packet which means that the RA matched + * our STA addresses. In proxySTA it means that the TA matched + * an entry in our address search table with the corresponding + * 'no_ack' bit is the address search entry cleared. + * amsdu_present + * AMSDU present + * + * mpdu_frame_control_field + * Frame control field in header. Only valid when the field is marked valid. + * + * mpdu_duration_field + * Duration field in header. Only valid when the field is marked valid. + * + * mac_addr_adx + * MAC addresses in the received frame. Only valid when corresponding + * address valid bit is set + * + * mpdu_qos_control_field, mpdu_ht_control_field + * QoS/HT control fields from header. Valid only when corresponding fields + * are marked valid + * + * vdev_id + * Virtual device associated with this peer + * RXOLE uses this to determine intra-BSS routing. + * + * service_code + * Opaque service code between PPE and Wi-Fi + * This field gets passed on by REO to PPE in the EDMA descriptor + * ('REO_TO_PPE_RING'). + * + * priority_valid + * This field gets passed on by REO to PPE in the EDMA descriptor + * ('REO_TO_PPE_RING'). + * + * src_info + * Source (virtual) device/interface info. associated with + * this peer + * This field gets passed on by REO to PPE in the EDMA descriptor + * ('REO_TO_PPE_RING'). + * + * multi_link_addr_ad1_ad2_valid + * If set, Rx OLE shall convert Address1 and Address2 of received + * data frames to multi-link addresses during decapsulation to eth/nwifi + * + * multi_link_addr_ad1,ad2 + * Multi-link receiver address1,2. Only valid when corresponding + * valid bit is set + * + * authorize_to_send_wds + * If not set, RXDMA shall perform error-routing for WDS packets + * as the sender is not authorized and might misuse WDS frame + * format to inject packets with arbitrary DA/SA. + * + */ + +enum rx_msdu_start_pkt_type { + RX_MSDU_START_PKT_TYPE_11A, + RX_MSDU_START_PKT_TYPE_11B, + RX_MSDU_START_PKT_TYPE_11N, + RX_MSDU_START_PKT_TYPE_11AC, + RX_MSDU_START_PKT_TYPE_11AX, +}; + +enum rx_msdu_start_sgi { + RX_MSDU_START_SGI_0_8_US, + RX_MSDU_START_SGI_0_4_US, + RX_MSDU_START_SGI_1_6_US, + RX_MSDU_START_SGI_3_2_US, +}; + +enum rx_msdu_start_recv_bw { + RX_MSDU_START_RECV_BW_20MHZ, + RX_MSDU_START_RECV_BW_40MHZ, + RX_MSDU_START_RECV_BW_80MHZ, + RX_MSDU_START_RECV_BW_160MHZ, +}; + +enum rx_msdu_start_reception_type { + RX_MSDU_START_RECEPTION_TYPE_SU, + RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO, + RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA, + RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO, + RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO, + RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA, + RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO, +}; + +#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0) +#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2) + +#define RX_MSDU_END_INFO1_REPORTED_MPDU_LENGTH GENMASK(13, 0) + +#define RX_MSDU_END_INFO2_CCE_SUPER_RULE GENMASK(13, 8) +#define RX_MSDU_END_INFO2_CCND_TRUNCATE BIT(14) +#define RX_MSDU_END_INFO2_CCND_CCE_DIS BIT(15) + +#define RX_MSDU_END_INFO3_DA_OFFSET GENMASK(5, 0) +#define RX_MSDU_END_INFO3_SA_OFFSET GENMASK(11, 6) +#define RX_MSDU_END_INFO3_DA_OFFSET_VALID BIT(12) +#define RX_MSDU_END_INFO3_SA_OFFSET_VALID BIT(13) + +#define RX_MSDU_END_INFO4_TCP_FLAG GENMASK(8, 0) +#define RX_MSDU_END_INFO4_LRO_ELIGIBLE BIT(9) + +#define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0) +#define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1) +#define RX_MSDU_END_INFO5_TO_DS BIT(2) +#define RX_MSDU_END_INFO5_TID GENMASK(6, 3) +#define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7) +#define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8) +#define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9) +#define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10) +#define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12) +#define RX_MSDU_END_INFO5_LAST_MSDU BIT(13) +#define RX_MSDU_END_INFO5_FROM_DS BIT(14) +#define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15) + +#define RX_MSDU_END_INFO6_MSDU_DROP BIT(0) +#define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1) +#define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6) +#define RX_MSDU_END_INFO6_USE_PPE BIT(26) +#define RX_MSDU_END_INFO6_MESH_STA GENMASK(28, 27) +#define RX_MSDU_END_INFO6_VLAN_CTAG_STRIPPED BIT(29) +#define RX_MSDU_END_INFO6_VLAN_STAG_STRIPPED BIT(30) +#define RX_MSDU_END_INFO6_FRAGMENT_FLAG BIT(31) + +#define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0) +#define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8) +#define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9) +#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) +#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11) +#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12) +#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13) +#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14) +#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15) + +#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0) + +#define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6) +#define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15) +#define RX_MSDU_END_INFO9_INRA_BSS BIT(16) +#define RX_MSDU_END_INFO9_DEST_CHIP_ID GENMASK(18, 17) +#define RX_MSDU_END_INFO9_MCAST_ECHO BIT(19) +#define RX_MSDU_END_INFO9_WDS_LEARN_EVENT BIT(20) +#define RX_MSDU_END_INFO9_WDS_ROAM_EVENT BIT(21) +#define RX_MSDU_END_INFO9_WDS_KEEP_ALIVE_EVENT BIT(22) + +#define RX_MSDU_END_INFO10_MSDU_LENGTH GENMASK(13, 0) +#define RX_MSDU_END_INFO10_STBC BIT(14) +#define RX_MSDU_END_INFO10_IPSEC_ESP BIT(15) +#define RX_MSDU_END_INFO10_L3_OFFSET GENMASK(22, 16) +#define RX_MSDU_END_INFO10_IPSEC_AH BIT(23) +#define RX_MSDU_END_INFO10_L4_OFFSET GENMASK(31, 24) + +#define RX_MSDU_END_INFO11_MSDU_NUMBER GENMASK(7, 0) +#define RX_MSDU_END_INFO11_DECAP_FORMAT GENMASK(9, 8) +#define RX_MSDU_END_INFO11_IPV4 BIT(10) +#define RX_MSDU_END_INFO11_IPV6 BIT(11) +#define RX_MSDU_END_INFO11_TCP BIT(12) +#define RX_MSDU_END_INFO11_UDP BIT(13) +#define RX_MSDU_END_INFO11_IP_FRAG BIT(14) +#define RX_MSDU_END_INFO11_TCP_ONLY_ACK BIT(15) +#define RX_MSDU_END_INFO11_DA_IS_BCAST_MCAST BIT(16) +#define RX_MSDU_END_INFO11_SEL_TOEPLITZ_HASH GENMASK(18, 17) +#define RX_MSDU_END_INFO11_IP_FIXED_HDR_VALID BIT(19) +#define RX_MSDU_END_INFO11_IP_EXTN_HDR_VALID BIT(20) +#define RX_MSDU_END_INFO11_IP_TCP_UDP_HDR_VALID BIT(21) +#define RX_MSDU_END_INFO11_MESH_CTRL_PRESENT BIT(22) +#define RX_MSDU_END_INFO11_LDPC BIT(23) +#define RX_MSDU_END_INFO11_IP4_IP6_NXT_HDR GENMASK(31, 24) + +#define RX_MSDU_END_INFO12_USER_RSSI GENMASK(7, 0) +#define RX_MSDU_END_INFO12_PKT_TYPE GENMASK(11, 8) +#define RX_MSDU_END_INFO12_SGI GENMASK(13, 12) +#define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14) +#define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18) +#define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21) +#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) +#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31) + +#define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0) +#define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2) +#define RX_MSDU_END_INFO13_AST_IDX_NOT_FOUND BIT(3) +#define RX_MSDU_END_INFO13_AST_IDX_TIMEDOUT BIT(4) +#define RX_MSDU_END_INFO13_POWER_MGMT BIT(5) +#define RX_MSDU_END_INFO13_NON_QOS BIT(6) +#define RX_MSDU_END_INFO13_NULL_DATA BIT(7) +#define RX_MSDU_END_INFO13_MGMT_TYPE BIT(8) +#define RX_MSDU_END_INFO13_CTRL_TYPE BIT(9) +#define RX_MSDU_END_INFO13_MORE_DATA BIT(10) +#define RX_MSDU_END_INFO13_EOSP BIT(11) +#define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12) +#define RX_MSDU_END_INFO13_ORDER BIT(14) +#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) +#define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16) +#define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17) +#define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18) +#define RX_MSDU_END_INFO13_IP_CKSUM_FAIL BIT(19) +#define RX_MSDU_END_INFO13_SA_IDX_INVALID BIT(20) +#define RX_MSDU_END_INFO13_DA_IDX_INVALID BIT(21) +#define RX_MSDU_END_INFO13_AMSDU_ADDR_MISMATCH BIT(22) +#define RX_MSDU_END_INFO13_RX_IN_TX_DECRYPT_BYP BIT(23) +#define RX_MSDU_END_INFO13_ENCRYPT_REQUIRED BIT(24) +#define RX_MSDU_END_INFO13_DIRECTED BIT(25) +#define RX_MSDU_END_INFO13_BUFFER_FRAGMENT BIT(26) +#define RX_MSDU_END_INFO13_MPDU_LEN_ERR BIT(27) +#define RX_MSDU_END_INFO13_TKIP_MIC_ERR BIT(28) +#define RX_MSDU_END_INFO13_DECRYPT_ERR BIT(29) +#define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30) +#define RX_MSDU_END_INFO13_FCS_ERR BIT(31) + +#define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10) +#define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13) +#define RX_MSDU_END_INFO14_MSDU_DONE BIT(31) + +struct rx_msdu_end_qcn9274 { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 info1; + __le16 info2; + __le16 cumulative_l3_checksum; + __le32 rule_indication0; + __le32 ipv6_options_crc; + __le16 info3; + __le16 l3_type; + __le32 rule_indication1; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info4; + __le16 window_size; + __le16 sa_sw_peer_id; + __le16 info5; + __le16 sa_idx; + __le16 da_idx_or_sw_peer_id; + __le32 info6; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 tcp_udp_cksum; + __le16 info7; + __le16 cumulative_ip_length; + __le32 info8; + __le32 info9; + __le32 info10; + __le32 info11; + __le16 vlan_ctag_ci; + __le16 vlan_stag_ci; + __le32 peer_meta_data; + __le32 info12; + __le32 flow_id_toeplitz; + __le32 ppdu_start_timestamp_63_32; + __le32 phy_meta_data; + __le32 ppdu_start_timestamp_31_0; + __le32 toeplitz_hash_2_or_4; + __le16 res0; + __le16 sa_15_0; + __le32 sa_47_16; + __le32 info13; + __le32 info14; +} __packed; + +/* rx_msdu_end + * + * rxpcu_mpdu_filter_in_category + * Field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. Values + * are defined in enum %RX_DESC_RXPCU_FILTER_*. + * + * sw_frame_group_id + * SW processes frames based on certain classifications. Values + * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*. + * + * phy_ppdu_id + * A ppdu counter value that PHY increments for every PPDU + * received. The counter value wraps around. + * + * ip_hdr_cksum + * This can include the IP header checksum or the pseudo + * header checksum used by TCP/UDP checksum. + * + * reported_mpdu_length + * MPDU length before decapsulation. Only valid when first_msdu is + * set. This field is taken directly from the length field of the + * A-MPDU delimiter or the preamble length field for non-A-MPDU + * frames. + * + * cce_super_rule + * Indicates the super filter rule. + * + * cce_classify_not_done_truncate + * Classification failed due to truncated frame. + * + * cce_classify_not_done_cce_dis + * Classification failed due to CCE global disable + * + * cumulative_l3_checksum + * FISA: IP header checksum including the total MSDU length + * that is part of this flow aggregated so far, reported if + * 'RXOLE_R0_FISA_CTRL. CHKSUM_CUM_IP_LEN_EN' is set + * + * rule_indication + * Bitmap indicating which of rules have matched. + * + * ipv6_options_crc + * 32 bit CRC computed out of IP v6 extension headers. + * + * da_offset + * Offset into MSDU buffer for DA. + * + * sa_offset + * Offset into MSDU buffer for SA. + * + * da_offset_valid + * da_offset field is valid. This will be set to 0 in case + * of a dynamic A-MSDU when DA is compressed. + * + * sa_offset_valid + * sa_offset field is valid. This will be set to 0 in case + * of a dynamic A-MSDU when SA is compressed. + * + * l3_type + * The 16-bit type value indicating the type of L3 later + * extracted from LLC/SNAP, set to zero if SNAP is not + * available. + * + * tcp_seq_number + * TCP sequence number. + * + * tcp_ack_number + * TCP acknowledge number. + * + * tcp_flag + * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}. + * + * lro_eligible + * Computed out of TCP and IP fields to indicate that this + * MSDU is eligible for LRO. + * + * window_size + * TCP receive window size. + * + * sa_sw_peer_id + * sw_peer_id from the address search entry corresponding to the + * source address of the MSDU. + * + * sa_idx_timeout + * Indicates an unsuccessful MAC source address search due to the + * expiring of the search timer. + * + * da_idx_timeout + * Indicates an unsuccessful MAC destination address search due to + * the expiring of the search timer. + * + * to_ds + * Set if the to DS bit is set in the frame control. + * + * tid + * TID field in the QoS control field + * + * sa_is_valid + * Indicates that OLE found a valid SA entry. + * + * da_is_valid + * Indicates that OLE found a valid DA entry. + * + * da_is_mcbc + * Field Only valid if da_is_valid is set. Indicates the DA address + * was a Multicast of Broadcast address. + * + * l3_header_padding + * Number of bytes padded to make sure that the L3 header will + * always start of a Dword boundary. + * + * first_msdu + * Indicates the first MSDU of A-MSDU. If both first_msdu and + * last_msdu are set in the MSDU then this is a non-aggregated MSDU + * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both + * first_mpdu and last_mpdu bits set to 0. + * + * last_msdu + * Indicates the last MSDU of the A-MSDU. MPDU end status is only + * valid when last_msdu is set. + * + * fr_ds + * Set if the from DS bit is set in the frame control. + * + * ip_chksum_fail_copy + * Indicates that the computed checksum did not match the + * checksum in the IP header. + * + * sa_idx + * The offset in the address table which matches the MAC source + * address. + * + * da_idx_or_sw_peer_id + * Based on a register configuration in RXOLE, this field will + * contain: + * The offset in the address table which matches the MAC destination + * address + * OR: + * sw_peer_id from the address search entry corresponding to + * the destination address of the MSDU + * + * msdu_drop + * REO shall drop this MSDU and not forward it to any other ring. + * + * The id of the reo exit ring where the msdu frame shall push + * after (MPDU level) reordering has finished. Values are defined + * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_. + * + * flow_idx + * Flow table index. + * + * use_ppe + * Indicates to RXDMA to ignore the REO_destination_indication + * and use a programmed value corresponding to the REO2PPE + * ring + * + * mesh_sta + * When set, this is a Mesh (11s) STA. + * + * vlan_ctag_stripped + * Set by RXOLE if it stripped 4-bytes of C-VLAN Tag from the + * packet + * + * vlan_stag_stripped + * Set by RXOLE if it stripped 4-bytes of S-VLAN Tag from the + * packet + * + * fragment_flag + * Indicates that this is an 802.11 fragment frame. This is + * set when either the more_frag bit is set in the frame control + * or the fragment number is not zero. Only set when first_msdu + * is set. + * + * fse_metadata + * FSE related meta data. + * + * cce_metadata + * CCE related meta data. + * + * tcp_udp_chksum + * The value of the computed TCP/UDP checksum. A mode bit + * selects whether this checksum is the full checksum or the + * partial checksum which does not include the pseudo header. + * + * aggregation_count + * Number of MSDU's aggregated so far + * + * flow_aggregation_continuation + * To indicate that this MSDU can be aggregated with + * the previous packet with the same flow id + * + * fisa_timeout + * To indicate that the aggregation has restarted for + * this flow due to timeout + * + * tcp_udp_chksum_fail + * Indicates that the computed checksum (tcp_udp_chksum) did + * not match the checksum in the TCP/UDP header. + * + * msdu_limit_error + * Indicates that the MSDU threshold was exceeded and thus all the + * rest of the MSDUs will not be scattered and will not be + * decapsulated but will be DMA'ed in RAW format as a single MSDU. + * + * flow_idx_timeout + * Indicates an unsuccessful flow search due to the expiring of + * the search timer. + * + * flow_idx_invalid + * flow id is not valid. + * + * cce_match + * Indicates that this status has a corresponding MSDU that + * requires FW processing. The OLE will have classification + * ring mask registers which will indicate the ring(s) for + * packets and descriptors which need FW attention. + * + * amsdu_parser_error + * A-MSDU could not be properly de-agregated. + * + * cumulative_ip_length + * Total MSDU length that is part of this flow aggregated + * so far + * + * key_id + * The key ID octet from the IV. Only valid when first_msdu is set. + * + * service_code + * Opaque service code between PPE and Wi-Fi + * + * priority_valid + * This field gets passed on by REO to PPE in the EDMA descriptor + * + * intra_bss + * This packet needs intra-BSS routing by SW as the 'vdev_id' + * for the destination is the same as 'vdev_id' (from 'RX_MPDU_PCU_START') + * that this MSDU was got in. + * + * dest_chip_id + * If intra_bss is set, copied by RXOLE from 'ADDR_SEARCH_ENTRY' + * to support intra-BSS routing with multi-chip multi-link + * operation. This indicates into which chip's TCL the packet should be + * queueued + * + * multicast_echo + * If set, this packet is a multicast echo, i.e. the DA is + * multicast and Rx OLE SA search with mcast_echo_check = 1 + * passed. RXDMA should release such packets to WBM. + * + * wds_learning_event + * If set, this packet has an SA search failure with WDS learning + * enabled for the peer. RXOLE should route this TLV to the + * RXDMA0 status ring to notify FW. + * + * wds_roaming_event + * If set, this packet's SA 'Sw_peer_id' mismatches the 'Sw_peer_id' + * of the peer through which the packet was got, indicating + * the SA node has roamed. RXOLE should route this TLV to + * the RXDMA0 status ring to notify FW. + * + * wds_keep_alive_event + * If set, the AST timestamp for this packet's SA is older + * than the current timestamp by more than a threshold programmed + * in RXOLE. RXOLE should route this TLV to the RXDMA0 status + * ring to notify FW to keep the AST entry for the SA alive. + * + * msdu_length + * MSDU length in bytes after decapsulation. + * This field is still valid for MPDU frames without A-MSDU. + * It still represents MSDU length after decapsulation + * + * stbc + * When set, use STBC transmission rates. + * + * ipsec_esp + * Set if IPv4/v6 packet is using IPsec ESP. + * + * l3_offset + * Depending upon mode bit, this field either indicates the + * L3 offset in bytes from the start of the RX_HEADER or the IP + * offset in bytes from the start of the packet after + * decapsulation. The latter is only valid if ipv4_proto or + * ipv6_proto is set. + * + * ipsec_ah + * Set if IPv4/v6 packet is using IPsec AH + * + * l4_offset + * Depending upon mode bit, this field either indicates the + * L4 offset nin bytes from the start of RX_HEADER (only valid + * if either ipv4_proto or ipv6_proto is set to 1) or indicates + * the offset in bytes to the start of TCP or UDP header from + * the start of the IP header after decapsulation (Only valid if + * tcp_proto or udp_proto is set). The value 0 indicates that + * the offset is longer than 127 bytes. + * + * msdu_number + * Indicates the MSDU number within a MPDU. This value is + * reset to zero at the start of each MPDU. If the number of + * MSDU exceeds 255 this number will wrap using modulo 256. + * + * decap_type + * Indicates the format after decapsulation. Values are defined in + * enum %MPDU_START_DECAP_TYPE_*. + * + * ipv4_proto + * Set if L2 layer indicates IPv4 protocol. + * + * ipv6_proto + * Set if L2 layer indicates IPv6 protocol. + * + * tcp_proto + * Set if the ipv4_proto or ipv6_proto are set and the IP protocol + * indicates TCP. + * + * udp_proto + * Set if the ipv4_proto or ipv6_proto are set and the IP protocol + * indicates UDP. + * + * ip_frag + * Indicates that either the IP More frag bit is set or IP frag + * number is non-zero. If set indicates that this is a fragmented + * IP packet. + * + * tcp_only_ack + * Set if only the TCP Ack bit is set in the TCP flags and if + * the TCP payload is 0. + * + * da_is_bcast_mcast + * The destination address is broadcast or multicast. + * + * toeplitz_hash + * Actual chosen Hash. + * 0 - Toeplitz hash of 2-tuple (IP source address, IP + * destination address) + * 1 - Toeplitz hash of 4-tuple (IP source address, + * IP destination address, L4 (TCP/UDP) source port, + * L4 (TCP/UDP) destination port) + * 2 - Toeplitz of flow_id + * 3 - Zero is used + * + * ip_fixed_header_valid + * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed + * fully within first 256 bytes of the packet + * + * ip_extn_header_valid + * IPv6/IPv6 header, including IPv4 options and + * recognizable extension headers parsed fully within first 256 + * bytes of the packet + * + * tcp_udp_header_valid + * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP + * header parsed fully within first 256 bytes of the packet + * + * mesh_control_present + * When set, this MSDU includes the 'Mesh Control' field + * + * ldpc + * + * ip4_protocol_ip6_next_header + * For IPv4, this is the 8 bit protocol field set). For IPv6 this + * is the 8 bit next_header field. + * + * + * vlan_ctag_ci + * 2 bytes of C-VLAN Tag Control Information from WHO_L2_LLC + * + * vlan_stag_ci + * 2 bytes of S-VLAN Tag Control Information from WHO_L2_LLC + * in case of double VLAN + * + * peer_meta_data + * Meta data that SW has programmed in the Peer table entry + * of the transmitting STA. + * + * user_rssi + * RSSI for this user + * + * pkt_type + * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*. + * + * sgi + * Field only valid when pkt type is HT, VHT or HE. Values are + * defined in enum %RX_MSDU_START_SGI_*. + * + * rate_mcs + * MCS Rate used. + * + * receive_bandwidth + * Full receive Bandwidth. Values are defined in enum + * %RX_MSDU_START_RECV_*. + * + * reception_type + * Indicates what type of reception this is and defined in enum + * %RX_MSDU_START_RECEPTION_TYPE_*. + * + * mimo_ss_bitmap + * Field only valid when + * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or + * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO. + * + * Bitmap, with each bit indicating if the related spatial + * stream is used for this STA + * + * LSB related to SS 0 + * + * 0 - spatial stream not used for this reception + * 1 - spatial stream used for this reception + * + * msdu_done_copy + * If set indicates that the RX packet data, RX header data, + * RX PPDU start descriptor, RX MPDU start/end descriptor, + * RX MSDU start/end descriptors and RX Attention descriptor + * are all valid. This bit is in the last 64-bit of the descriptor + * expected to be subscribed in future hardware. + * + * flow_id_toeplitz + * Toeplitz hash of 5-tuple + * {IP source address, IP destination address, IP source port, IP + * destination port, L4 protocol} in case of non-IPSec. + * + * In case of IPSec - Toeplitz hash of 4-tuple + * {IP source address, IP destination address, SPI, L4 protocol} + * + * The relevant Toeplitz key registers are provided in RxOLE's + * instance of common parser module. These registers are separate + * from the Toeplitz keys used by ASE/FSE modules inside RxOLE. + * The actual value will be passed on from common parser module + * to RxOLE in one of the WHO_* TLVs. + * + * ppdu_start_timestamp + * Timestamp that indicates when the PPDU that contained this MPDU + * started on the medium. + * + * phy_meta_data + * SW programmed Meta data provided by the PHY. Can be used for SW + * to indicate the channel the device is on. + * + * toeplitz_hash_2_or_4 + * Controlled by multiple RxOLE registers for TCP/UDP over + * IPv4/IPv6 - Either, Toeplitz hash computed over 2-tuple + * IPv4 or IPv6 src/dest addresses is reported; or, Toeplitz + * hash computed over 4-tuple IPv4 or IPv6 src/dest addresses + * and src/dest ports is reported. The Flow_id_toeplitz hash + * can also be reported here. Usually the hash reported here + * is the one used for hash-based REO routing (see use_flow_id_toeplitz_clfy + * in 'RXPT_CLASSIFY_INFO'). + * + * sa + * Source MAC address + * + * first_mpdu + * Indicates the first MSDU of the PPDU. If both first_mpdu + * and last_mpdu are set in the MSDU then this is a not an + * A-MPDU frame but a stand alone MPDU. Interior MPDU in an + * A-MPDU shall have both first_mpdu and last_mpdu bits set to + * 0. The PPDU start status will only be valid when this bit + * is set. + * + * mcast_bcast + * Multicast / broadcast indicator. Only set when the MAC + * address 1 bit 0 is set indicating mcast/bcast and the BSSID + * matches one of the 4 BSSID registers. Only set when + * first_msdu is set. + * + * ast_index_not_found + * Only valid when first_msdu is set. Indicates no AST matching + * entries within the max search count. + * + * ast_index_timeout + * Only valid when first_msdu is set. Indicates an unsuccessful + * search in the address search table due to timeout. + * + * power_mgmt + * Power management bit set in the 802.11 header. Only set + * when first_msdu is set. + * + * non_qos + * Set if packet is not a non-QoS data frame. Only set when + * first_msdu is set. + * + * null_data + * Set if frame type indicates either null data or QoS null + * data format. Only set when first_msdu is set. + * + * mgmt_type + * Set if packet is a management packet. Only set when + * first_msdu is set. + * + * ctrl_type + * Set if packet is a control packet. Only set when first_msdu + * is set. + * + * more_data + * Set if more bit in frame control is set. Only set when + * first_msdu is set. + * + * eosp + * Set if the EOSP (end of service period) bit in the QoS + * control field is set. Only set when first_msdu is set. + * + * a_msdu_error + * Set if number of MSDUs in A-MSDU is above a threshold or if the + * size of the MSDU is invalid. This receive buffer will contain + * all of the remainder of MSDUs in this MPDU w/o decapsulation. + * + * order + * Set if the order bit in the frame control is set. Only + * set when first_msdu is set. + * + * wifi_parser_error + * Indicates that the WiFi frame has one of the following errors + * + * overflow_err + * RXPCU Receive FIFO ran out of space to receive the full MPDU. + * Therefore this MPDU is terminated early and is thus corrupted. + * + * This MPDU will not be ACKed. + * + * RXPCU might still be able to correctly receive the following + * MPDUs in the PPDU if enough fifo space became available in time. + * + * mpdu_length_err + * Set by RXPCU if the expected MPDU length does not correspond + * with the actually received number of bytes in the MPDU. + * + * tcp_udp_chksum_fail + * Indicates that the computed checksum (tcp_udp_chksum) did + * not match the checksum in the TCP/UDP header. + * + * ip_chksum_fail + * Indicates that the computed checksum did not match the + * checksum in the IP header. + * + * sa_idx_invalid + * Indicates no matching entry was found in the address search + * table for the source MAC address. + * + * da_idx_invalid + * Indicates no matching entry was found in the address search + * table for the destination MAC address. + * + * amsdu_addr_mismatch + * Indicates that an A-MSDU with 'from DS = 0' had an SA mismatching + * TA or an A-MDU with 'to DS = 0' had a DA mismatching RA + * + * rx_in_tx_decrypt_byp + * Indicates that RX packet is not decrypted as Crypto is busy + * with TX packet processing. + * + * encrypt_required + * Indicates that this data type frame is not encrypted even if + * the policy for this MPDU requires encryption as indicated in + * the peer table key type. + * + * directed + * MPDU is a directed packet which means that the RA matched + * our STA addresses. In proxySTA it means that the TA matched + * an entry in our address search table with the corresponding + * 'no_ack' bit is the address search entry cleared. + * + * buffer_fragment + * Indicates that at least one of the rx buffers has been + * fragmented. If set the FW should look at the rx_frag_info + * descriptor described below. + * + * mpdu_length_err + * Indicates that the MPDU was pre-maturely terminated + * resulting in a truncated MPDU. Don't trust the MPDU length + * field. + * + * tkip_mic_err + * Indicates that the MPDU Michael integrity check failed + * + * decrypt_err + * Indicates that the MPDU decrypt integrity check failed + * + * fcs_err + * Indicates that the MPDU FCS check failed + * + * flow_idx_timeout + * Indicates an unsuccessful flow search due to the expiring of + * the search timer. + * + * flow_idx_invalid + * flow id is not valid. + * + * decrypt_status_code + * Field provides insight into the decryption performed. Values + * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*. + * + * rx_bitmap_not_updated + * Frame is received, but RXPCU could not update the receive bitmap + * due to (temporary) fifo constraints. + * + * msdu_done + * If set indicates that the RX packet data, RX header data, RX + * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU + * start/end descriptors and RX Attention descriptor are all + * valid. This bit must be in the last octet of the + * descriptor. + * + */ + +/* TODO: Move to compact TLV approach + * By default these tlv's are not aligned to 128b boundary + * Need to remove unused qwords and make them compact/aligned + */ +struct hal_rx_desc_qcn9274 { + struct rx_msdu_end_qcn9274 msdu_end; + struct rx_mpdu_start_qcn9274 mpdu_start; + u8 msdu_payload[]; +} __packed; + +#define RX_BE_PADDING0_BYTES 8 +#define RX_BE_PADDING1_BYTES 8 + +#define HAL_RX_BE_PKT_HDR_TLV_LEN 112 + +struct rx_pkt_hdr_tlv { + __le64 tag; + __le64 phy_ppdu_id; + u8 rx_pkt_hdr[HAL_RX_BE_PKT_HDR_TLV_LEN]; +}; + +struct hal_rx_desc_wcn7850 { + __le64 msdu_end_tag; + struct rx_msdu_end_qcn9274 msdu_end; + u8 rx_padding0[RX_BE_PADDING0_BYTES]; + __le64 mpdu_start_tag; + struct rx_mpdu_start_qcn9274 mpdu_start; + struct rx_pkt_hdr_tlv pkt_hdr_tlv; + u8 msdu_payload[]; +}; + +struct hal_rx_desc { + union { + struct hal_rx_desc_qcn9274 qcn9274; + struct hal_rx_desc_wcn7850 wcn7850; + } u; +} __packed; + +#define MAX_USER_POS 8 +#define MAX_MU_GROUP_ID 64 +#define MAX_MU_GROUP_SHOW 16 +#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW) + +#define HAL_RX_RU_ALLOC_TYPE_MAX 6 +#define RU_26 1 +#define RU_52 2 +#define RU_106 4 +#define RU_242 9 +#define RU_484 18 +#define RU_996 37 + +#endif /* ATH12K_RX_DESC_H */ diff --git a/drivers/net/wireless/ath/ath12k/trace.c b/drivers/net/wireless/ath/ath12k/trace.c new file mode 100644 index 000000000000..0d0edf4204b7 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/trace.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h new file mode 100644 index 000000000000..f72096684b74 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/trace.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) + +#include <linux/tracepoint.h> +#include "core.h" + +#define _TRACE_H_ + +/* create empty functions when tracing is disabled */ +#if !defined(CONFIG_ATH12K_TRACING) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !CONFIG_ATH12K_TRACING || __CHECKER__ */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ath12k + +TRACE_EVENT(ath12k_htt_pktlog, + TP_PROTO(struct ath12k *ar, const void *buf, u16 buf_len, + u32 pktlog_checksum), + + TP_ARGS(ar, buf, buf_len, pktlog_checksum), + + TP_STRUCT__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, buf_len) + __field(u32, pktlog_checksum) + __dynamic_array(u8, pktlog, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->buf_len = buf_len; + __entry->pktlog_checksum = pktlog_checksum; + memcpy(__get_dynamic_array(pktlog), buf, buf_len); + ), + + TP_printk( + "%s %s size %u pktlog_checksum %d", + __get_str(driver), + __get_str(device), + __entry->buf_len, + __entry->pktlog_checksum + ) +); + +TRACE_EVENT(ath12k_htt_ppdu_stats, + TP_PROTO(struct ath12k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, len) + __field(u32, info) + __field(u32, sync_tstmp_lo_us) + __field(u32, sync_tstmp_hi_us) + __field(u32, mlo_offset_lo) + __field(u32, mlo_offset_hi) + __field(u32, mlo_offset_clks) + __field(u32, mlo_comp_clks) + __field(u32, mlo_comp_timer) + __dynamic_array(u8, ppdu, len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->len = len; + __entry->info = ar->pdev->timestamp.info; + __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us; + __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us; + __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo; + __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi; + __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks; + __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks; + __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer; + memcpy(__get_dynamic_array(ppdu), data, len); + ), + + TP_printk( + "%s %s ppdu len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +TRACE_EVENT(ath12k_htt_rxdesc, + TP_PROTO(struct ath12k *ar, const void *data, size_t type, size_t len), + + TP_ARGS(ar, data, type, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, len) + __field(u16, type) + __field(u32, info) + __field(u32, sync_tstmp_lo_us) + __field(u32, sync_tstmp_hi_us) + __field(u32, mlo_offset_lo) + __field(u32, mlo_offset_hi) + __field(u32, mlo_offset_clks) + __field(u32, mlo_comp_clks) + __field(u32, mlo_comp_timer) + __dynamic_array(u8, rxdesc, len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->len = len; + __entry->type = type; + __entry->info = ar->pdev->timestamp.info; + __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us; + __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us; + __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo; + __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi; + __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks; + __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks; + __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer; + memcpy(__get_dynamic_array(rxdesc), data, len); + ), + + TP_printk( + "%s %s rxdesc len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ + +/* we don't want to use include/trace/events */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c new file mode 100644 index 000000000000..f6df14149531 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -0,0 +1,6600 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/mac80211.h> +#include <net/cfg80211.h> +#include <linux/completion.h> +#include <linux/if_ether.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/uuid.h> +#include <linux/time.h> +#include <linux/of.h> +#include "core.h" +#include "debug.h" +#include "mac.h" +#include "hw.h" +#include "peer.h" + +struct ath12k_wmi_svc_ready_parse { + bool wmi_svc_bitmap_done; +}; + +struct ath12k_wmi_dma_ring_caps_parse { + struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; + u32 n_dma_ring_caps; +}; + +struct ath12k_wmi_service_ext_arg { + u32 default_conc_scan_config_bits; + u32 default_fw_config_bits; + struct ath12k_wmi_ppe_threshold_arg ppet; + u32 he_cap_info; + u32 mpdu_density; + u32 max_bssid_rx_filters; + u32 num_hw_modes; + u32 num_phy; +}; + +struct ath12k_wmi_svc_rdy_ext_parse { + struct ath12k_wmi_service_ext_arg arg; + const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; + const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; + u32 n_hw_mode_caps; + u32 tot_phy_id; + struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; + struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; + u32 n_mac_phy_caps; + const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; + const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; + u32 n_ext_hal_reg_caps; + struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; + bool hw_mode_done; + bool mac_phy_done; + bool ext_hal_reg_done; + bool mac_phy_chainmask_combo_done; + bool mac_phy_chainmask_cap_done; + bool oem_dma_ring_cap_done; + bool dma_ring_cap_done; +}; + +struct ath12k_wmi_svc_rdy_ext2_parse { + struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; + bool dma_ring_cap_done; +}; + +struct ath12k_wmi_rdy_parse { + u32 num_extra_mac_addr; +}; + +struct ath12k_wmi_dma_buf_release_arg { + struct ath12k_wmi_dma_buf_release_fixed_params fixed; + const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; + const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; + u32 num_buf_entry; + u32 num_meta; + bool buf_entry_done; + bool meta_data_done; +}; + +struct ath12k_wmi_tlv_policy { + size_t min_len; +}; + +struct wmi_tlv_mgmt_rx_parse { + const struct ath12k_wmi_mgmt_rx_params *fixed; + const u8 *frame_buf; + bool frame_buf_done; +}; + +static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { + [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, + [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, + [WMI_TAG_SERVICE_READY_EVENT] = { + .min_len = sizeof(struct wmi_service_ready_event) }, + [WMI_TAG_SERVICE_READY_EXT_EVENT] = { + .min_len = sizeof(struct wmi_service_ready_ext_event) }, + [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { + .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, + [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { + .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, + [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { + .min_len = sizeof(struct wmi_vdev_start_resp_event) }, + [WMI_TAG_PEER_DELETE_RESP_EVENT] = { + .min_len = sizeof(struct wmi_peer_delete_resp_event) }, + [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { + .min_len = sizeof(struct wmi_bcn_tx_status_event) }, + [WMI_TAG_VDEV_STOPPED_EVENT] = { + .min_len = sizeof(struct wmi_vdev_stopped_event) }, + [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { + .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, + [WMI_TAG_MGMT_RX_HDR] = { + .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, + [WMI_TAG_MGMT_TX_COMPL_EVENT] = { + .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, + [WMI_TAG_SCAN_EVENT] = { + .min_len = sizeof(struct wmi_scan_event) }, + [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { + .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, + [WMI_TAG_ROAM_EVENT] = { + .min_len = sizeof(struct wmi_roam_event) }, + [WMI_TAG_CHAN_INFO_EVENT] = { + .min_len = sizeof(struct wmi_chan_info_event) }, + [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { + .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, + [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { + .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, + [WMI_TAG_READY_EVENT] = { + .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, + [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { + .min_len = sizeof(struct wmi_service_available_event) }, + [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { + .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, + [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { + .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, + [WMI_TAG_HOST_SWFDA_EVENT] = { + .min_len = sizeof(struct wmi_fils_discovery_event) }, + [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { + .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, + [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { + .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, +}; + +static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) +{ + return le32_encode_bits(cmd, WMI_TLV_TAG) | + le32_encode_bits(len, WMI_TLV_LEN); +} + +static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) +{ + return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); +} + +void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, + struct ath12k_wmi_resource_config_arg *config) +{ + config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS; + + if (ab->num_radios == 2) { + config->num_peers = TARGET_NUM_PEERS(DBS); + config->num_tids = TARGET_NUM_TIDS(DBS); + } else if (ab->num_radios == 3) { + config->num_peers = TARGET_NUM_PEERS(DBS_SBS); + config->num_tids = TARGET_NUM_TIDS(DBS_SBS); + } else { + /* Control should not reach here */ + config->num_peers = TARGET_NUM_PEERS(SINGLE); + config->num_tids = TARGET_NUM_TIDS(SINGLE); + } + config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; + config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; + config->num_peer_keys = TARGET_NUM_PEER_KEYS; + config->ast_skid_limit = TARGET_AST_SKID_LIMIT; + config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; + + if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) + config->rx_decap_mode = TARGET_DECAP_MODE_RAW; + else + config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; + + config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; + config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; + config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; + config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; + config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS; + config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS; + config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE; + config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; + config->num_wds_entries = TARGET_NUM_WDS_ENTRIES; + config->dma_burst_size = TARGET_DMA_BURST_SIZE; + config->rx_skip_defrag_timeout_dup_detection_check = + TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config->vow_config = TARGET_VOW_CONFIG; + config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV; + config->num_msdu_desc = TARGET_NUM_MSDU_DESC; + config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; + config->rx_batchmode = TARGET_RX_BATCHMODE; + /* Indicates host supports peer map v3 and unmap v2 support */ + config->peer_map_unmap_version = 0x32; + config->twt_ap_pdev_count = ab->num_radios; + config->twt_ap_sta_count = 1000; +} + +void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, + struct ath12k_wmi_resource_config_arg *config) +{ + config->num_vdevs = 4; + config->num_peers = 16; + config->num_tids = 32; + + config->num_offload_peers = 3; + config->num_offload_reorder_buffs = 3; + config->num_peer_keys = TARGET_NUM_PEER_KEYS; + config->ast_skid_limit = TARGET_AST_SKID_LIMIT; + config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; + config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; + config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; + config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; + config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; + config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; + config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; + config->num_mcast_groups = 0; + config->num_mcast_table_elems = 0; + config->mcast2ucast_mode = 0; + config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; + config->num_wds_entries = 0; + config->dma_burst_size = 0; + config->rx_skip_defrag_timeout_dup_detection_check = 0; + config->vow_config = TARGET_VOW_CONFIG; + config->gtk_offload_max_vdev = 2; + config->num_msdu_desc = 0x400; + config->beacon_tx_offload_max_vdev = 2; + config->rx_batchmode = TARGET_RX_BATCHMODE; + + config->peer_map_unmap_version = 0x1; + config->use_pdev_id = 1; + config->max_frag_entries = 0xa; + config->num_tdls_vdevs = 0x1; + config->num_tdls_conn_table_entries = 8; + config->beacon_tx_offload_max_vdev = 0x2; + config->num_multicast_filter_entries = 0x20; + config->num_wow_filters = 0x16; + config->num_keep_alive_pattern = 0; +} + +#define PRIMAP(_hw_mode_) \ + [_hw_mode_] = _hw_mode_##_PRI + +static const int ath12k_hw_mode_pri_map[] = { + PRIMAP(WMI_HOST_HW_MODE_SINGLE), + PRIMAP(WMI_HOST_HW_MODE_DBS), + PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), + PRIMAP(WMI_HOST_HW_MODE_SBS), + PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), + PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), + /* keep last */ + PRIMAP(WMI_HOST_HW_MODE_MAX), +}; + +static int +ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, + const void *ptr, void *data), + void *data) +{ + const void *begin = ptr; + const struct wmi_tlv *tlv; + u16 tlv_tag, tlv_len; + int ret; + + while (len > 0) { + if (len < sizeof(*tlv)) { + ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", + ptr - begin, len, sizeof(*tlv)); + return -EINVAL; + } + + tlv = ptr; + tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); + tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); + ptr += sizeof(*tlv); + len -= sizeof(*tlv); + + if (tlv_len > len) { + ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", + tlv_tag, ptr - begin, len, tlv_len); + return -EINVAL; + } + + if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && + ath12k_wmi_tlv_policies[tlv_tag].min_len && + ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { + ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", + tlv_tag, ptr - begin, tlv_len, + ath12k_wmi_tlv_policies[tlv_tag].min_len); + return -EINVAL; + } + + ret = iter(ab, tlv_tag, tlv_len, ptr, data); + if (ret) + return ret; + + ptr += tlv_len; + len -= tlv_len; + } + + return 0; +} + +static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + const void **tb = data; + + if (tag < WMI_TAG_MAX) + tb[tag] = ptr; + + return 0; +} + +static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, + const void *ptr, size_t len) +{ + return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, + (void *)tb); +} + +static const void ** +ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr, + size_t len, gfp_t gfp) +{ + const void **tb; + int ret; + + tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); + if (!tb) + return ERR_PTR(-ENOMEM); + + ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len); + if (ret) { + kfree(tb); + return ERR_PTR(ret); + } + + return tb; +} + +static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, + u32 cmd_id) +{ + struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_cmd_hdr *cmd_hdr; + int ret; + + if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) + return -ENOMEM; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); + + memset(skb_cb, 0, sizeof(*skb_cb)); + ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); + + if (ret) + goto err_pull; + + return 0; + +err_pull: + skb_pull(skb, sizeof(struct wmi_cmd_hdr)); + return ret; +} + +int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, + u32 cmd_id) +{ + struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab; + int ret = -EOPNOTSUPP; + + might_sleep(); + + wait_event_timeout(wmi_sc->tx_credits_wq, ({ + ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + + if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) + ret = -ESHUTDOWN; + + (ret != -EAGAIN); + }), WMI_SEND_TIMEOUT_HZ); + + if (ret == -EAGAIN) + ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + + return ret; +} + +static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, + const void *ptr, + struct ath12k_wmi_service_ext_arg *arg) +{ + const struct wmi_service_ready_ext_event *ev = ptr; + int i; + + if (!ev) + return -EINVAL; + + /* Move this to host based bitmap */ + arg->default_conc_scan_config_bits = + le32_to_cpu(ev->default_conc_scan_config_bits); + arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); + arg->he_cap_info = le32_to_cpu(ev->he_cap_info); + arg->mpdu_density = le32_to_cpu(ev->mpdu_density); + arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); + arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); + arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); + + for (i = 0; i < WMI_MAX_NUM_SS; i++) + arg->ppet.ppet16_ppet8_ru3_ru0[i] = + le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); + + return 0; +} + +static int +ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, + struct ath12k_wmi_svc_rdy_ext_parse *svc, + u8 hw_mode_id, u8 phy_id, + struct ath12k_pdev *pdev) +{ + const struct ath12k_wmi_mac_phy_caps_params *mac_caps; + const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; + const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; + const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; + struct ath12k_band_cap *cap_band; + struct ath12k_pdev_cap *pdev_cap = &pdev->cap; + u32 phy_map; + u32 hw_idx, phy_idx = 0; + int i; + + if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) + return -EINVAL; + + for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { + if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) + break; + + phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); + phy_idx = fls(phy_map); + } + + if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) + return -EINVAL; + + phy_idx += phy_id; + if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) + return -EINVAL; + + mac_caps = wmi_mac_phy_caps + phy_idx; + + pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id); + pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); + pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); + + /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from + * band to band for a single radio, need to see how this should be + * handled. + */ + if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) { + pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); + pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); + } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) { + pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); + pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); + pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); + pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); + pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); + } else { + return -EINVAL; + } + + /* tx/rx chainmask reported from fw depends on the actual hw chains used, + * For example, for 4x4 capable macphys, first 4 chains can be used for first + * mac and the remaing 4 chains can be used for the second mac or vice-versa. + * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 + * will be advertised for second mac or vice-versa. Compute the shift value + * for tx/rx chainmask which will be used to advertise supported ht/vht rates to + * mac80211. + */ + pdev_cap->tx_chain_mask_shift = + find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); + pdev_cap->rx_chain_mask_shift = + find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); + + if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) { + cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; + cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); + cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); + cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); + cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); + cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); + cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); + for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) + cap_band->he_cap_phy_info[i] = + le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); + + cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); + cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); + + for (i = 0; i < WMI_MAX_NUM_SS; i++) + cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = + le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); + } + + if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) { + cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; + cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); + cap_band->max_bw_supported = + le32_to_cpu(mac_caps->max_bw_supported_5g); + cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); + cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); + cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); + cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); + for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) + cap_band->he_cap_phy_info[i] = + le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); + + cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); + cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); + + for (i = 0; i < WMI_MAX_NUM_SS; i++) + cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = + le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); + + cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; + cap_band->max_bw_supported = + le32_to_cpu(mac_caps->max_bw_supported_5g); + cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); + cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); + cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); + cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); + for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) + cap_band->he_cap_phy_info[i] = + le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); + + cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); + cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); + + for (i = 0; i < WMI_MAX_NUM_SS; i++) + cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = + le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); + } + + return 0; +} + +static int +ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, + const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, + const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, + u8 phy_idx, + struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) +{ + const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; + + if (!reg_caps || !ext_caps) + return -EINVAL; + + if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) + return -EINVAL; + + ext_reg_cap = &ext_caps[phy_idx]; + + param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); + param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); + param->eeprom_reg_domain_ext = + le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); + param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); + param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); + /* check if param->wireless_mode is needed */ + param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); + param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); + param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); + param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); + + return 0; +} + +static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, + const void *evt_buf, + struct ath12k_wmi_target_cap_arg *cap) +{ + const struct wmi_service_ready_event *ev = evt_buf; + + if (!ev) { + ath12k_err(ab, "%s: failed by NULL param\n", + __func__); + return -EINVAL; + } + + cap->phy_capability = le32_to_cpu(ev->phy_capability); + cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); + cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); + cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); + cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); + cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); + cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); + cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); + cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); + cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); + cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); + cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); + cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); + cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); + cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); + cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); + cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); + + return 0; +} + +/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in + * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each + * 4-byte word. + */ +static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, + const u32 *wmi_svc_bm) +{ + int i, j; + + for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { + do { + if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) + set_bit(j, wmi->wmi_ab->svc_map); + } while (++j % WMI_SERVICE_BITS_IN_SIZE32); + } +} + +static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_svc_ready_parse *svc_ready = data; + struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; + u16 expect_len; + + switch (tag) { + case WMI_TAG_SERVICE_READY_EVENT: + if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) + return -EINVAL; + break; + + case WMI_TAG_ARRAY_UINT32: + if (!svc_ready->wmi_svc_bitmap_done) { + expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); + if (len < expect_len) { + ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", + len, tag); + return -EINVAL; + } + + ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); + + svc_ready->wmi_svc_bitmap_done = true; + } + break; + default: + break; + } + + return 0; +} + +static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k_wmi_svc_ready_parse svc_ready = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_svc_rdy_parse, + &svc_ready); + if (ret) { + ath12k_warn(ab, "failed to parse tlv %d\n", ret); + return ret; + } + + return 0; +} + +struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len) +{ + struct sk_buff *skb; + struct ath12k_base *ab = wmi_sc->ab; + u32 round_len = roundup(len, 4); + + skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); + if (!skb) + return NULL; + + skb_reserve(skb, WMI_SKB_HEADROOM); + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + ath12k_warn(ab, "unaligned WMI skb data\n"); + + skb_put(skb, round_len); + memset(skb->data, 0, round_len); + + return skb; +} + +int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, + struct sk_buff *frame) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_mgmt_send_cmd *cmd; + struct wmi_tlv *frame_tlv; + struct sk_buff *skb; + u32 buf_len; + int ret, len; + + buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); + + len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mgmt_send_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->desc_id = cpu_to_le32(buf_id); + cmd->chanfreq = 0; + cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); + cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); + cmd->frame_len = cpu_to_le32(frame->len); + cmd->buf_len = cpu_to_le32(buf_len); + cmd->tx_params_valid = 0; + + frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); + frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len); + + memcpy(frame_tlv->value, frame->data, buf_len); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, + struct ath12k_wmi_vdev_create_arg *args) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_create_cmd *cmd; + struct sk_buff *skb; + struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; + struct wmi_tlv *tlv; + int ret, len; + void *ptr; + + /* It can be optimized my sending tx/rx chain configuration + * only for supported bands instead of always sending it for + * both the bands. + */ + len = sizeof(*cmd) + TLV_HDR_SIZE + + (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_create_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(args->if_id); + cmd->vdev_type = cpu_to_le32(args->type); + cmd->vdev_subtype = cpu_to_le32(args->subtype); + cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); + cmd->pdev_id = cpu_to_le32(args->pdev_id); + cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); + ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); + + ptr = skb->data + sizeof(*cmd); + len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + + ptr += TLV_HDR_SIZE; + txrx_streams = ptr; + len = sizeof(*txrx_streams); + txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, + len); + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; + txrx_streams->supported_tx_streams = + args->chains[NL80211_BAND_2GHZ].tx; + txrx_streams->supported_rx_streams = + args->chains[NL80211_BAND_2GHZ].rx; + + txrx_streams++; + txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, + len); + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; + txrx_streams->supported_tx_streams = + args->chains[NL80211_BAND_5GHZ].tx; + txrx_streams->supported_rx_streams = + args->chains[NL80211_BAND_5GHZ].rx; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", + args->if_id, args->type, args->subtype, + macaddr, args->pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to submit WMI_VDEV_CREATE_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_delete_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_delete_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_stop_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_stop_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_down_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_down_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, + struct wmi_vdev_start_req_arg *arg) +{ + memset(chan, 0, sizeof(*chan)); + + chan->mhz = cpu_to_le32(arg->freq); + chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1); + if (arg->mode == MODE_11AC_VHT80_80) + chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2); + else + chan->band_center_freq2 = 0; + + chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); + if (arg->passive) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); + if (arg->allow_ibss) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); + if (arg->allow_ht) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); + if (arg->allow_vht) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); + if (arg->allow_he) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); + if (arg->ht40plus) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); + if (arg->chan_radar) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); + if (arg->freq2_radar) + chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); + + chan->reg_info_1 = le32_encode_bits(arg->max_power, + WMI_CHAN_REG_INFO1_MAX_PWR) | + le32_encode_bits(arg->max_reg_power, + WMI_CHAN_REG_INFO1_MAX_REG_PWR); + + chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, + WMI_CHAN_REG_INFO2_ANT_MAX) | + le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); +} + +int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, + bool restart) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_start_request_cmd *cmd; + struct sk_buff *skb; + struct ath12k_wmi_channel_params *chan; + struct wmi_tlv *tlv; + void *ptr; + int ret, len; + + if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) + return -EINVAL; + + len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_start_request_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); + cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); + cmd->dtim_period = cpu_to_le32(arg->dtim_period); + cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); + cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); + cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); + cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); + cmd->regdomain = cpu_to_le32(arg->regdomain); + cmd->he_ops = cpu_to_le32(arg->he_ops); + + if (!restart) { + if (arg->ssid) { + cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); + } + if (arg->hidden_ssid) + cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); + if (arg->pmf_enabled) + cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); + } + + cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); + + ptr = skb->data + sizeof(*cmd); + chan = ptr; + + ath12k_wmi_put_wmi_channel(chan, arg); + + chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, + sizeof(*chan)); + ptr += sizeof(*chan); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + /* Note: This is a nested TLV containing: + * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + */ + + ptr += sizeof(*tlv); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", + restart ? "restart" : "start", arg->vdev_id, + arg->freq, arg->mode); + + if (restart) + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_VDEV_RESTART_REQUEST_CMDID); + else + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_VDEV_START_REQUEST_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", + restart ? "restart" : "start"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_up_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_up_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->vdev_assoc_id = cpu_to_le32(aid); + + ether_addr_copy(cmd->vdev_bssid.addr, bssid); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", + vdev_id, aid, bssid); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, + struct ath12k_wmi_peer_create_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_create_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_create_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, + sizeof(*cmd)); + + ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); + cmd->peer_type = cpu_to_le32(arg->peer_type); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI peer create vdev_id %d peer_addr %pM\n", + arg->vdev_id, arg->peer_addr); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, + const u8 *peer_addr, u8 vdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_delete_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_delete_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, + sizeof(*cmd)); + + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = cpu_to_le32(vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI peer delete vdev_id %d peer_addr %pM\n", + vdev_id, peer_addr); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, + struct ath12k_wmi_pdev_set_regdomain_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_set_regdomain_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, + sizeof(*cmd)); + + cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); + cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); + cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); + cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); + cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); + cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); + cmd->pdev_id = cpu_to_le32(arg->pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", + arg->current_rd_in_use, arg->current_rd_2g, + arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, + u32 vdev_id, u32 param_id, u32 param_val) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_set_param_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, + sizeof(*cmd)); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->param_id = cpu_to_le32(param_id); + cmd->param_value = cpu_to_le32(param_val); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev %d peer 0x%pM set param %d value %d\n", + vdev_id, peer_addr, param_id, param_val); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, + u8 peer_addr[ETH_ALEN], + u32 peer_tid_bitmap, + u8 vdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_flush_tids_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, + sizeof(*cmd)); + + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); + cmd->vdev_id = cpu_to_le32(vdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", + vdev_id, peer_addr, peer_tid_bitmap); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, + int vdev_id, const u8 *addr, + dma_addr_t paddr, u8 tid, + u8 ba_window_size_valid, + u32 ba_window_size) +{ + struct wmi_peer_reorder_queue_setup_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, + sizeof(*cmd)); + + ether_addr_copy(cmd->peer_macaddr.addr, addr); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->tid = cpu_to_le32(tid); + cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); + cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); + cmd->queue_no = cpu_to_le32(tid); + cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); + cmd->ba_window_size = cpu_to_le32(ba_window_size); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", + addr, vdev_id, tid); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, + WMI_PEER_REORDER_QUEUE_SETUP_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int +ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, + struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_reorder_queue_remove_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, + sizeof(*cmd)); + + ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, + arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, + u32 param_value, u8 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_param_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(pdev_id); + cmd->param_id = cpu_to_le32(param_id); + cmd->param_value = cpu_to_le32(param_value); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI pdev set param %d pdev id %d value %d\n", + param_id, pdev_id, param_value); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_set_ps_mode_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->sta_ps_mode = cpu_to_le32(enable); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev set psmode %d vdev id %d\n", + enable, vdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, + u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_suspend_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_suspend_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, + sizeof(*cmd)); + + cmd->suspend_opt = cpu_to_le32(suspend_opt); + cmd->pdev_id = cpu_to_le32(pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI pdev suspend pdev_id %d\n", pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_resume_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_resume_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI pdev resume pdev id %d\n", pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +/* TODO FW Support for the cmd is not available yet. + * Can be tested once the command and corresponding + * event is implemented in FW + */ +int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, + enum wmi_bss_chan_info_req_type type) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_pdev_bss_chan_info_req_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, + sizeof(*cmd)); + cmd->req_type = cpu_to_le32(type); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI bss chan info req type %d\n", type); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, + struct ath12k_wmi_ap_ps_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_ap_ps_peer_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->param = cpu_to_le32(arg->param); + cmd->value = cpu_to_le32(arg->value); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI set ap ps vdev id %d peer %pM param %d value %d\n", + arg->vdev_id, peer_addr, arg->param, arg->value); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, + u32 param, u32 param_value) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_sta_powersave_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->param = cpu_to_le32(param); + cmd->value = cpu_to_le32(param_value); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI set sta ps vdev_id %d param %d value %d\n", + vdev_id, param, param_value); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_force_fw_hang_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_force_fw_hang_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, + len); + + cmd->type = cpu_to_le32(type); + cmd->delay_time_ms = cpu_to_le32(delay_time_ms); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); + + if (ret) { + ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, + u32 param_id, u32 param_value) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_set_param_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->param_id = cpu_to_le32(param_id); + cmd->param_value = cpu_to_le32(param_value); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev id 0x%x set param %d value %d\n", + vdev_id, param_id, param_value); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_get_pdev_temperature_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, + u32 vdev_id, u32 bcn_ctrl_op) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_bcn_offload_ctrl_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", + vdev_id, bcn_ctrl_op); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, + struct ieee80211_mutable_offsets *offs, + struct sk_buff *bcn) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_bcn_tmpl_cmd *cmd; + struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len = roundup(bcn->len, 4); + + len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); + cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); + cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); + cmd->buf_len = cpu_to_le32(bcn->len); + + ptr = skb->data + sizeof(*cmd); + + bcn_prb_info = ptr; + len = sizeof(*bcn_prb_info); + bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, + len); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + + ptr += sizeof(*bcn_prb_info); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); + memcpy(tlv->value, bcn->data, bcn->len); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_vdev_install_key(struct ath12k *ar, + struct wmi_vdev_install_key_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_install_key_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret, len, key_len_aligned; + + /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key + * length is specifed in cmd->key_len. + */ + key_len_aligned = roundup(arg->key_len, 4); + + len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_install_key_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); + cmd->key_idx = cpu_to_le32(arg->key_idx); + cmd->key_flags = cpu_to_le32(arg->key_flags); + cmd->key_cipher = cpu_to_le32(arg->key_cipher); + cmd->key_len = cpu_to_le32(arg->key_len); + cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); + cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); + + if (arg->key_rsc_counter) + cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); + + tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); + memcpy(tlv->value, arg->key_data, arg->key_len); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev install key idx %d cipher %d len %d\n", + arg->key_idx, arg->key_cipher, arg->key_len); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, + struct ath12k_wmi_peer_assoc_arg *arg, + bool hw_crypto_disabled) +{ + cmd->peer_flags = 0; + + if (arg->is_wme_set) { + if (arg->qos_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); + if (arg->apsd_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); + if (arg->ht_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); + if (arg->bw_40) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); + if (arg->bw_80) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); + if (arg->bw_160) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); + + /* Typically if STBC is enabled for VHT it should be enabled + * for HT as well + **/ + if (arg->stbc_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); + + /* Typically if LDPC is enabled for VHT it should be enabled + * for HT as well + **/ + if (arg->ldpc_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); + + if (arg->static_mimops_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); + if (arg->dynamic_mimops_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); + if (arg->spatial_mux_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); + if (arg->vht_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); + if (arg->he_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); + if (arg->twt_requester) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); + if (arg->twt_responder) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); + } + + /* Suppress authorization for all AUTH modes that need 4-way handshake + * (during re-association). + * Authorization will be done for these modes on key installation. + */ + if (arg->auth_flag) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); + if (arg->need_ptk_4_way) { + cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); + if (!hw_crypto_disabled) + cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); + } + if (arg->need_gtk_2_way) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); + /* safe mode bypass the 4-way handshake */ + if (arg->safe_mode_enabled) + cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | + WMI_PEER_NEED_GTK_2_WAY)); + + if (arg->is_pmf_enabled) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); + + /* Disable AMSDU for station transmit, if user configures it */ + /* Disable AMSDU for AP transmit to 11n Stations, if user configures + * it + * if (arg->amsdu_disable) Add after FW support + **/ + + /* Target asserts if node is marked HT and all MCS is set to 0. + * Mark the node as non-HT if all the mcs rates are disabled through + * iwpriv + **/ + if (arg->peer_ht_rates.num_rates == 0) + cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); +} + +int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_peer_assoc_complete_cmd *cmd; + struct ath12k_wmi_vht_rate_set_params *mcs; + struct ath12k_wmi_he_rate_set_params *he_mcs; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + u32 peer_legacy_rates_align; + u32 peer_ht_rates_align; + int i, ret, len; + + peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, + sizeof(u32)); + peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, + sizeof(u32)); + + len = sizeof(*cmd) + + TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + + TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + + sizeof(*mcs) + TLV_HDR_SIZE + + (sizeof(*he_mcs) * arg->peer_he_mcs_count); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + ptr = skb->data; + + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + + cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); + cmd->peer_associd = cpu_to_le32(arg->peer_associd); + + ath12k_wmi_copy_peer_flags(cmd, arg, + test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, + &ar->ab->dev_flags)); + + ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); + + cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); + cmd->peer_caps = cpu_to_le32(arg->peer_caps); + cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); + cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); + cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); + cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); + cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); + cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); + + /* Update 11ax capabilities */ + cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); + cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); + cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); + cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); + cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); + for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) + cmd->peer_he_cap_phy[i] = + cpu_to_le32(arg->peer_he_cap_phyinfo[i]); + cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); + cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); + for (i = 0; i < WMI_MAX_NUM_SS; i++) + cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = + cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); + + /* Update peer legacy rate information */ + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); + + ptr += TLV_HDR_SIZE; + + cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); + memcpy(ptr, arg->peer_legacy_rates.rates, + arg->peer_legacy_rates.num_rates); + + /* Update peer HT rate information */ + ptr += peer_legacy_rates_align; + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); + ptr += TLV_HDR_SIZE; + cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); + memcpy(ptr, arg->peer_ht_rates.rates, + arg->peer_ht_rates.num_rates); + + /* VHT Rates */ + ptr += peer_ht_rates_align; + + mcs = ptr; + + mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, + sizeof(*mcs)); + + cmd->peer_nss = cpu_to_le32(arg->peer_nss); + + /* Update bandwidth-NSS mapping */ + cmd->peer_bw_rxnss_override = 0; + cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); + + if (arg->vht_capable) { + mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate); + mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set); + mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate); + mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set); + } + + /* HE Rates */ + cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); + cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); + + ptr += sizeof(*mcs); + + len = arg->peer_he_mcs_count * sizeof(*he_mcs); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + ptr += TLV_HDR_SIZE; + + /* Loop through the HE rate set */ + for (i = 0; i < arg->peer_he_mcs_count; i++) { + he_mcs = ptr; + he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, + sizeof(*he_mcs)); + + he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); + he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); + ptr += sizeof(*he_mcs); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", + cmd->vdev_id, cmd->peer_associd, arg->peer_mac, + cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, + cmd->peer_listen_intval, cmd->peer_ht_caps, + cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, + cmd->peer_mpdu_density, + cmd->peer_vht_caps, cmd->peer_he_cap_info, + cmd->peer_he_ops, cmd->peer_he_cap_info_ext, + cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], + cmd->peer_he_cap_phy[2], + cmd->peer_bw_rxnss_override); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PEER_ASSOC_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +void ath12k_wmi_start_scan_init(struct ath12k *ar, + struct ath12k_wmi_scan_req_arg *arg) +{ + /* setup commonly used values */ + arg->scan_req_id = 1; + arg->scan_priority = WMI_SCAN_PRIORITY_LOW; + arg->dwell_time_active = 50; + arg->dwell_time_active_2g = 0; + arg->dwell_time_passive = 150; + arg->dwell_time_active_6g = 40; + arg->dwell_time_passive_6g = 30; + arg->min_rest_time = 50; + arg->max_rest_time = 500; + arg->repeat_probe_time = 0; + arg->probe_spacing_time = 0; + arg->idle_time = 0; + arg->max_scan_time = 20000; + arg->probe_delay = 5; + arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | + WMI_SCAN_EVENT_COMPLETED | + WMI_SCAN_EVENT_BSS_CHANNEL | + WMI_SCAN_EVENT_FOREIGN_CHAN | + WMI_SCAN_EVENT_DEQUEUED; + arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT; + arg->num_bssid = 1; + + /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be + * ZEROs in probe request + */ + eth_broadcast_addr(arg->bssid_list[0].addr); +} + +static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, + struct ath12k_wmi_scan_req_arg *arg) +{ + /* Scan events subscription */ + if (arg->scan_ev_started) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); + if (arg->scan_ev_completed) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); + if (arg->scan_ev_bss_chan) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); + if (arg->scan_ev_foreign_chan) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); + if (arg->scan_ev_dequeued) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); + if (arg->scan_ev_preempted) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); + if (arg->scan_ev_start_failed) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); + if (arg->scan_ev_restarted) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); + if (arg->scan_ev_foreign_chn_exit) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); + if (arg->scan_ev_suspended) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); + if (arg->scan_ev_resumed) + cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); + + /** Set scan control flags */ + cmd->scan_ctrl_flags = 0; + if (arg->scan_f_passive) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); + if (arg->scan_f_strict_passive_pch) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); + if (arg->scan_f_promisc_mode) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); + if (arg->scan_f_capture_phy_err) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); + if (arg->scan_f_half_rate) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); + if (arg->scan_f_quarter_rate) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); + if (arg->scan_f_cck_rates) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); + if (arg->scan_f_ofdm_rates) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); + if (arg->scan_f_chan_stat_evnt) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); + if (arg->scan_f_filter_prb_req) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); + if (arg->scan_f_bcast_probe) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); + if (arg->scan_f_offchan_mgmt_tx) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); + if (arg->scan_f_offchan_data_tx) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); + if (arg->scan_f_force_active_dfs_chn) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); + if (arg->scan_f_add_tpc_ie_in_probe) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); + if (arg->scan_f_add_ds_ie_in_probe) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); + if (arg->scan_f_add_spoofed_mac_in_probe) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); + if (arg->scan_f_add_rand_seq_in_probe) + cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); + if (arg->scan_f_en_ie_whitelist_in_probe) + cmd->scan_ctrl_flags |= + cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); + + cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, + WMI_SCAN_DWELL_MODE_MASK); +} + +int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_req_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_start_scan_cmd *cmd; + struct ath12k_wmi_ssid_params *ssid = NULL; + struct ath12k_wmi_mac_addr_params *bssid; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + int i, ret, len; + u32 *tmp_ptr; + u8 extraie_len_with_pad = 0; + struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; + struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; + + len = sizeof(*cmd); + + len += TLV_HDR_SIZE; + if (arg->num_chan) + len += arg->num_chan * sizeof(u32); + + len += TLV_HDR_SIZE; + if (arg->num_ssids) + len += arg->num_ssids * sizeof(*ssid); + + len += TLV_HDR_SIZE; + if (arg->num_bssid) + len += sizeof(*bssid) * arg->num_bssid; + + len += TLV_HDR_SIZE; + if (arg->extraie.len) + extraie_len_with_pad = + roundup(arg->extraie.len, sizeof(u32)); + len += extraie_len_with_pad; + + if (arg->num_hint_bssid) + len += TLV_HDR_SIZE + + arg->num_hint_bssid * sizeof(*hint_bssid); + + if (arg->num_hint_s_ssid) + len += TLV_HDR_SIZE + + arg->num_hint_s_ssid * sizeof(*s_ssid); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + ptr = skb->data; + + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, + sizeof(*cmd)); + + cmd->scan_id = cpu_to_le32(arg->scan_id); + cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->scan_priority = cpu_to_le32(arg->scan_priority); + cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); + + ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); + + cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); + cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); + cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); + cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); + cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); + cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); + cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); + cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); + cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); + cmd->idle_time = cpu_to_le32(arg->idle_time); + cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); + cmd->probe_delay = cpu_to_le32(arg->probe_delay); + cmd->burst_duration = cpu_to_le32(arg->burst_duration); + cmd->num_chan = cpu_to_le32(arg->num_chan); + cmd->num_bssid = cpu_to_le32(arg->num_bssid); + cmd->num_ssids = cpu_to_le32(arg->num_ssids); + cmd->ie_len = cpu_to_le32(arg->extraie.len); + cmd->n_probes = cpu_to_le32(arg->n_probes); + + ptr += sizeof(*cmd); + + len = arg->num_chan * sizeof(u32); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); + ptr += TLV_HDR_SIZE; + tmp_ptr = (u32 *)ptr; + + memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); + + ptr += len; + + len = arg->num_ssids * sizeof(*ssid); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); + + ptr += TLV_HDR_SIZE; + + if (arg->num_ssids) { + ssid = ptr; + for (i = 0; i < arg->num_ssids; ++i) { + ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); + memcpy(ssid->ssid, arg->ssid[i].ssid, + arg->ssid[i].ssid_len); + ssid++; + } + } + + ptr += (arg->num_ssids * sizeof(*ssid)); + len = arg->num_bssid * sizeof(*bssid); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); + + ptr += TLV_HDR_SIZE; + bssid = ptr; + + if (arg->num_bssid) { + for (i = 0; i < arg->num_bssid; ++i) { + ether_addr_copy(bssid->addr, + arg->bssid_list[i].addr); + bssid++; + } + } + + ptr += arg->num_bssid * sizeof(*bssid); + + len = extraie_len_with_pad; + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); + ptr += TLV_HDR_SIZE; + + if (arg->extraie.len) + memcpy(ptr, arg->extraie.ptr, + arg->extraie.len); + + ptr += extraie_len_with_pad; + + if (arg->num_hint_s_ssid) { + len = arg->num_hint_s_ssid * sizeof(*s_ssid); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); + ptr += TLV_HDR_SIZE; + s_ssid = ptr; + for (i = 0; i < arg->num_hint_s_ssid; ++i) { + s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; + s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; + s_ssid++; + } + ptr += len; + } + + if (arg->num_hint_bssid) { + len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); + ptr += TLV_HDR_SIZE; + hint_bssid = ptr; + for (i = 0; i < arg->num_hint_bssid; ++i) { + hint_bssid->freq_flags = + arg->hint_bssid[i].freq_flags; + ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], + &hint_bssid->bssid.addr[0]); + hint_bssid++; + } + } + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_START_SCAN_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_cancel_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_stop_scan_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_stop_scan_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->requestor = cpu_to_le32(arg->requester); + cmd->scan_id = cpu_to_le32(arg->scan_id); + cmd->pdev_id = cpu_to_le32(arg->pdev_id); + /* stop the scan with the corresponding scan_id */ + if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { + /* Cancelling all scans */ + cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); + } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { + /* Cancelling VAP scans */ + cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); + } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { + /* Cancelling specific scan */ + cmd->req_type = WMI_SCAN_STOP_ONE; + } else { + ath12k_warn(ar->ab, "invalid scan cancel req_type %d", + arg->req_type); + dev_kfree_skb(skb); + return -EINVAL; + } + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_STOP_SCAN_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_chan_list_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_scan_chan_list_cmd *cmd; + struct sk_buff *skb; + struct ath12k_wmi_channel_params *chan_info; + struct ath12k_wmi_channel_arg *channel_arg; + struct wmi_tlv *tlv; + void *ptr; + int i, ret, len; + u16 num_send_chans, num_sends = 0, max_chan_limit = 0; + __le32 *reg1, *reg2; + + channel_arg = &arg->channel[0]; + while (arg->nallchans) { + len = sizeof(*cmd) + TLV_HDR_SIZE; + max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / + sizeof(*chan_info); + + num_send_chans = min(arg->nallchans, max_chan_limit); + + arg->nallchans -= num_send_chans; + len += sizeof(*chan_info) * num_send_chans; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_scan_chan_list_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(arg->pdev_id); + cmd->num_scan_chans = cpu_to_le32(num_send_chans); + if (num_sends) + cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", + num_send_chans, len, cmd->pdev_id, num_sends); + + ptr = skb->data + sizeof(*cmd); + + len = sizeof(*chan_info) * num_send_chans; + tlv = ptr; + tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, + len); + ptr += TLV_HDR_SIZE; + + for (i = 0; i < num_send_chans; ++i) { + chan_info = ptr; + memset(chan_info, 0, sizeof(*chan_info)); + len = sizeof(*chan_info); + chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, + len); + + reg1 = &chan_info->reg_info_1; + reg2 = &chan_info->reg_info_2; + chan_info->mhz = cpu_to_le32(channel_arg->mhz); + chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); + chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); + + if (channel_arg->is_chan_passive) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); + if (channel_arg->allow_he) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); + else if (channel_arg->allow_vht) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); + else if (channel_arg->allow_ht) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); + if (channel_arg->half_rate) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); + if (channel_arg->quarter_rate) + chan_info->info |= + cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); + + if (channel_arg->psc_channel) + chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); + + chan_info->info |= le32_encode_bits(channel_arg->phy_mode, + WMI_CHAN_INFO_MODE); + *reg1 |= le32_encode_bits(channel_arg->minpower, + WMI_CHAN_REG_INFO1_MIN_PWR); + *reg1 |= le32_encode_bits(channel_arg->maxpower, + WMI_CHAN_REG_INFO1_MAX_PWR); + *reg1 |= le32_encode_bits(channel_arg->maxregpower, + WMI_CHAN_REG_INFO1_MAX_REG_PWR); + *reg1 |= le32_encode_bits(channel_arg->reg_class_id, + WMI_CHAN_REG_INFO1_REG_CLS); + *reg2 |= le32_encode_bits(channel_arg->antennamax, + WMI_CHAN_REG_INFO2_ANT_MAX); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", + i, chan_info->mhz, chan_info->info); + + ptr += sizeof(*chan_info); + + channel_arg++; + } + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); + dev_kfree_skb(skb); + return ret; + } + + num_sends++; + } + + return 0; +} + +int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, + struct wmi_wmm_params_all_arg *param) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_set_wmm_params_cmd *cmd; + struct wmi_wmm_params *wmm_param; + struct wmi_wmm_params_arg *wmi_wmm_arg; + struct sk_buff *skb; + int ret, ac; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->wmm_param_type = 0; + + for (ac = 0; ac < WME_NUM_AC; ac++) { + switch (ac) { + case WME_AC_BE: + wmi_wmm_arg = ¶m->ac_be; + break; + case WME_AC_BK: + wmi_wmm_arg = ¶m->ac_bk; + break; + case WME_AC_VI: + wmi_wmm_arg = ¶m->ac_vi; + break; + case WME_AC_VO: + wmi_wmm_arg = ¶m->ac_vo; + break; + } + + wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; + wmm_param->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, + sizeof(*wmm_param)); + + wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); + wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); + wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); + wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); + wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); + wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", + ac, wmm_param->aifs, wmm_param->cwmin, + wmm_param->cwmax, wmm_param->txoplimit, + wmm_param->acm, wmm_param->no_ack); + } + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_VDEV_SET_WMM_PARAMS_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, + u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_dfs_phyerr_offload_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; + cmd->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, + sizeof(*cmd)); + + cmd->pdev_id = cpu_to_le32(pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI dfs phy err offload enable pdev id %d\n", pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 initiator, u32 reason) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_delba_send_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_delba_send_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = cpu_to_le32(tid); + cmd->initiator = cpu_to_le32(initiator); + cmd->reasoncode = cpu_to_le32(reason); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", + vdev_id, mac, tid, initiator, reason); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); + + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_DELBA_SEND_CMDID cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 status) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_addba_setresponse_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_addba_setresponse_cmd *)skb->data; + cmd->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = cpu_to_le32(tid); + cmd->statuscode = cpu_to_le32(status); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", + vdev_id, mac, tid, status); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); + + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 buf_size) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_addba_send_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_addba_send_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + cmd->tid = cpu_to_le32(tid); + cmd->buffersize = cpu_to_le32(buf_size); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", + vdev_id, mac, tid, buf_size); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); + + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_addba_clear_resp_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; + cmd->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, mac); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", + vdev_id, mac); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); + + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, + struct ath12k_wmi_init_country_arg *arg) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_init_country_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_init_country_cmd *)skb->data; + cmd->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, + sizeof(*cmd)); + + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + + switch (arg->flags) { + case ALPHA_IS_SET: + cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; + memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); + break; + case CC_IS_SET: + cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); + cmd->cc_info.country_code = + cpu_to_le32(arg->cc_info.country_code); + break; + case REGDMN_IS_SET: + cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); + cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); + break; + default: + ret = -EINVAL; + goto out; + } + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_SET_INIT_COUNTRY_CMDID); + +out: + if (ret) { + ath12k_warn(ar->ab, + "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int +ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_enable_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_enable_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, + len); + cmd->pdev_id = cpu_to_le32(pdev_id); + cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); + cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); + cmd->congestion_thresh_setup = + cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); + cmd->congestion_thresh_teardown = + cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); + cmd->congestion_thresh_critical = + cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); + cmd->interference_thresh_teardown = + cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); + cmd->interference_thresh_setup = + cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); + cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); + cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); + cmd->no_of_bcast_mcast_slots = + cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); + cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); + cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); + cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); + cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); + cmd->remove_sta_slot_interval = + cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); + /* TODO add MBSSID support */ + cmd->mbss_support = 0; + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_TWT_ENABLE_CMDID); + if (ret) { + ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int +ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_disable_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_disable_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, + len); + cmd->pdev_id = cpu_to_le32(pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_TWT_DISABLE_CMDID); + if (ret) { + ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int +ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, + struct ieee80211_he_obss_pd *he_obss_pd) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_obss_spatial_reuse_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, + len); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->enable = cpu_to_le32(he_obss_pd->enable); + cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); + cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); + if (ret) { + ath12k_warn(ab, + "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, + u8 bss_color, u32 period, + bool enable) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_obss_color_collision_cfg_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, + len); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : + cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); + cmd->current_bss_color = cpu_to_le32(bss_color); + cmd->detection_period_ms = cpu_to_le32(period); + cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); + cmd->free_slot_expiry_time_ms = 0; + cmd->flags = 0; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", + cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, + cmd->detection_period_ms, cmd->scan_period_ms); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); + if (ret) { + ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, + bool enable) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct wmi_bss_color_change_enable_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, + len); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->enable = enable ? cpu_to_le32(1) : 0; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi_send_bss_color_change_enable id %d enable %d\n", + cmd->vdev_id, cmd->enable); + + ret = ath12k_wmi_cmd_send(wmi, skb, + WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); + if (ret) { + ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, + struct sk_buff *tmpl) +{ + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len; + struct wmi_fils_discovery_tmpl_cmd *cmd; + + aligned_len = roundup(tmpl->len, 4); + len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev %i set FILS discovery template\n", vdev_id); + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->buf_len = cpu_to_le32(tmpl->len); + ptr = skb->data + sizeof(*cmd); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); + memcpy(tlv->value, tmpl->data, tmpl->len); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "WMI vdev %i failed to send FILS discovery template command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, + struct sk_buff *tmpl) +{ + struct wmi_probe_tmpl_cmd *cmd; + struct ath12k_wmi_bcn_prb_info_params *probe_info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len = roundup(tmpl->len, 4); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev %i set probe response template\n", vdev_id); + + len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_probe_tmpl_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->buf_len = cpu_to_le32(tmpl->len); + + ptr = skb->data + sizeof(*cmd); + + probe_info = ptr; + len = sizeof(*probe_info); + probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, + len); + probe_info->caps = 0; + probe_info->erp = 0; + + ptr += sizeof(*probe_info); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); + memcpy(tlv->value, tmpl->data, tmpl->len); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "WMI vdev %i failed to send probe response template command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + +int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, + bool unsol_bcast_probe_resp_enabled) +{ + struct sk_buff *skb; + int ret, len; + struct wmi_fils_discovery_cmd *cmd; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI vdev %i set %s interval to %u TU\n", + vdev_id, unsol_bcast_probe_resp_enabled ? + "unsolicited broadcast probe response" : "FILS discovery", + interval); + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_fils_discovery_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, + len); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->interval = cpu_to_le32(interval); + cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "WMI vdev %i failed to send FILS discovery enable/disable command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + +static void +ath12k_fill_band_to_mac_param(struct ath12k_base *soc, + struct ath12k_wmi_pdev_band_arg *arg) +{ + u8 i; + struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; + struct ath12k_pdev *pdev; + + for (i = 0; i < soc->num_radios; i++) { + pdev = &soc->pdevs[i]; + hal_reg_cap = &soc->hal_reg_cap[i]; + arg[i].pdev_id = pdev->pdev_id; + + switch (pdev->cap.supported_bands) { + case WMI_HOST_WLAN_2G_5G_CAP: + arg[i].start_freq = hal_reg_cap->low_2ghz_chan; + arg[i].end_freq = hal_reg_cap->high_5ghz_chan; + break; + case WMI_HOST_WLAN_2G_CAP: + arg[i].start_freq = hal_reg_cap->low_2ghz_chan; + arg[i].end_freq = hal_reg_cap->high_2ghz_chan; + break; + case WMI_HOST_WLAN_5G_CAP: + arg[i].start_freq = hal_reg_cap->low_5ghz_chan; + arg[i].end_freq = hal_reg_cap->high_5ghz_chan; + break; + default: + break; + } + } +} + +static void +ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg, + struct ath12k_wmi_resource_config_arg *tg_cfg) +{ + wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); + wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); + wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); + wmi_cfg->num_offload_reorder_buffs = + cpu_to_le32(tg_cfg->num_offload_reorder_buffs); + wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); + wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); + wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); + wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); + wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); + wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); + wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); + wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); + wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); + wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); + wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); + wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); + wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); + wmi_cfg->roam_offload_max_ap_profiles = + cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); + wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); + wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); + wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); + wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); + wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); + wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); + wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); + wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = + cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); + wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); + wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); + wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); + wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); + wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); + wmi_cfg->num_tdls_conn_table_entries = + cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); + wmi_cfg->beacon_tx_offload_max_vdev = + cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); + wmi_cfg->num_multicast_filter_entries = + cpu_to_le32(tg_cfg->num_multicast_filter_entries); + wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); + wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); + wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); + wmi_cfg->max_tdls_concurrent_sleep_sta = + cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); + wmi_cfg->max_tdls_concurrent_buffer_sta = + cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); + wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); + wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); + wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); + wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); + wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); + wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); + wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); + wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config); + wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); + wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); + wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); + wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); + wmi_cfg->host_service_flags = + cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); +} + +static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, + struct ath12k_wmi_init_cmd_arg *arg) +{ + struct ath12k_base *ab = wmi->wmi_ab->ab; + struct sk_buff *skb; + struct wmi_init_cmd *cmd; + struct ath12k_wmi_resource_config_params *cfg; + struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; + struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; + struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; + struct wmi_tlv *tlv; + size_t ret, len; + void *ptr; + u32 hw_mode_len = 0; + u16 idx; + + if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) + hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + + (arg->num_band_to_mac * sizeof(*band_to_mac)); + + len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + + (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_init_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, + sizeof(*cmd)); + + ptr = skb->data + sizeof(*cmd); + cfg = ptr; + + ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg); + + cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, + sizeof(*cfg)); + + ptr += sizeof(*cfg); + host_mem_chunks = ptr + TLV_HDR_SIZE; + len = sizeof(struct ath12k_wmi_host_mem_chunk_params); + + for (idx = 0; idx < arg->num_mem_chunks; ++idx) { + host_mem_chunks[idx].tlv_header = + ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, + len); + + host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); + host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); + host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", + arg->mem_chunks[idx].req_id, + (u64)arg->mem_chunks[idx].paddr, + arg->mem_chunks[idx].len); + } + cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); + len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; + + /* num_mem_chunks is zero */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + ptr += TLV_HDR_SIZE + len; + + if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { + hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; + hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, + sizeof(*hw_mode)); + + hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); + hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); + + ptr += sizeof(*hw_mode); + + len = arg->num_band_to_mac * sizeof(*band_to_mac); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + + ptr += TLV_HDR_SIZE; + len = sizeof(*band_to_mac); + + for (idx = 0; idx < arg->num_band_to_mac; idx++) { + band_to_mac = (void *)ptr; + + band_to_mac->tlv_header = + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, + len); + band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); + band_to_mac->start_freq = + cpu_to_le32(arg->band_to_mac[idx].start_freq); + band_to_mac->end_freq = + cpu_to_le32(arg->band_to_mac[idx].end_freq); + ptr += sizeof(*band_to_mac); + } + } + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); + if (ret) { + ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, + int pdev_id) +{ + struct ath12k_wmi_pdev_lro_config_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, + sizeof(*cmd)); + + get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); + get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); + + cmd->pdev_id = cpu_to_le32(pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send lro cfg req wmi cmd\n"); + goto err; + } + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + +int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + +int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + +int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, + enum wmi_host_hw_mode_config_type mode) +{ + struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; + struct sk_buff *skb; + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + int len; + int ret; + + len = sizeof(*cmd); + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; + + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, + sizeof(*cmd)); + + cmd->pdev_id = WMI_PDEV_ID_SOC; + cmd->hw_mode_index = cpu_to_le32(mode); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); + if (ret) { + ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_cmd_init(struct ath12k_base *ab) +{ + struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath12k_wmi_init_cmd_arg arg = {}; + + ab->hw_params->wmi_init(ab, &arg.res_cfg); + + arg.num_mem_chunks = wmi_sc->num_mem_chunks; + arg.hw_mode_id = wmi_sc->preferred_hw_mode; + arg.mem_chunks = wmi_sc->mem_chunks; + + if (ab->hw_params->single_pdev_only) + arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; + + arg.num_band_to_mac = ab->num_radios; + ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); + + return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg); +} + +int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, + struct ath12k_wmi_vdev_spectral_conf_arg *arg) +{ + struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->scan_count = cpu_to_le32(arg->scan_count); + cmd->scan_period = cpu_to_le32(arg->scan_period); + cmd->scan_priority = cpu_to_le32(arg->scan_priority); + cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); + cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); + cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); + cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); + cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); + cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); + cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); + cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); + cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); + cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); + cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); + cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); + cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); + cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); + cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI spectral scan config cmd vdev_id 0x%x\n", + arg->vdev_id); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send spectral scan config wmi cmd\n"); + goto err; + } + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + +int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, + u32 trigger, u32 enable) +{ + struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, + sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->trigger_cmd = cpu_to_le32(trigger); + cmd->enable_cmd = cpu_to_le32(enable); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI spectral enable cmd vdev id 0x%x\n", + vdev_id); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send spectral enable wmi cmd\n"); + goto err; + } + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + +int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, + struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) +{ + struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, + sizeof(*cmd)); + + cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id)); + cmd->module_id = cpu_to_le32(arg->module_id); + cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); + cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); + cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); + cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); + cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); + cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); + cmd->num_elems = cpu_to_le32(arg->num_elems); + cmd->buf_size = cpu_to_le32(arg->buf_size); + cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); + cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI DMA ring cfg req cmd pdev_id 0x%x\n", + arg->pdev_id); + + ret = ath12k_wmi_cmd_send(ar->wmi, skb, + WMI_PDEV_DMA_RING_CFG_REQ_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to send dma ring cfg req wmi cmd\n"); + goto err; + } + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + +static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_dma_buf_release_arg *arg = data; + + if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) + return -EPROTO; + + if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) + return -ENOBUFS; + + arg->num_buf_entry++; + return 0; +} + +static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_dma_buf_release_arg *arg = data; + + if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) + return -EPROTO; + + if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) + return -ENOBUFS; + + arg->num_meta++; + + return 0; +} + +static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_dma_buf_release_arg *arg = data; + const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; + u32 pdev_id; + int ret; + + switch (tag) { + case WMI_TAG_DMA_BUF_RELEASE: + fixed = ptr; + arg->fixed = *fixed; + pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); + arg->fixed.pdev_id = cpu_to_le32(pdev_id); + break; + case WMI_TAG_ARRAY_STRUCT: + if (!arg->buf_entry_done) { + arg->num_buf_entry = 0; + arg->buf_entry = ptr; + + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_dma_buf_entry_parse, + arg); + if (ret) { + ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", + ret); + return ret; + } + + arg->buf_entry_done = true; + } else if (!arg->meta_data_done) { + arg->num_meta = 0; + arg->meta_data = ptr; + + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_dma_buf_meta_parse, + arg); + if (ret) { + ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", + ret); + return ret; + } + + arg->meta_data_done = true; + } + break; + default: + break; + } + return 0; +} + +static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_wmi_dma_buf_release_arg arg = {}; + struct ath12k_dbring_buf_release_event param; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_dma_buf_parse, + &arg); + if (ret) { + ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); + return; + } + + param.fixed = arg.fixed; + param.buf_entry = arg.buf_entry; + param.num_buf_entry = arg.num_buf_entry; + param.meta_data = arg.meta_data; + param.num_meta = arg.num_meta; + + ret = ath12k_dbring_buffer_release_event(ab, ¶m); + if (ret) { + ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); + return; + } +} + +static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; + u32 phy_map = 0; + + if (tag != WMI_TAG_HW_MODE_CAPABILITIES) + return -EPROTO; + + if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) + return -ENOBUFS; + + hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, + hw_mode_id); + svc_rdy_ext->n_hw_mode_caps++; + + phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); + svc_rdy_ext->tot_phy_id += fls(phy_map); + + return 0; +} + +static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, + u16 len, const void *ptr, void *data) +{ + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; + enum wmi_host_hw_mode_config_type mode, pref; + u32 i; + int ret; + + svc_rdy_ext->n_hw_mode_caps = 0; + svc_rdy_ext->hw_mode_caps = ptr; + + ret = ath12k_wmi_tlv_iter(soc, ptr, len, + ath12k_wmi_hw_mode_caps_parse, + svc_rdy_ext); + if (ret) { + ath12k_warn(soc, "failed to parse tlv %d\n", ret); + return ret; + } + + for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { + hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; + mode = le32_to_cpu(hw_mode_caps->hw_mode_id); + pref = soc->wmi_ab.preferred_hw_mode; + + if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) { + svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; + soc->wmi_ab.preferred_hw_mode = mode; + } + } + + ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n", + soc->wmi_ab.preferred_hw_mode); + if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) + return -EINVAL; + + return 0; +} + +static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + + if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) + return -EPROTO; + + if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) + return -ENOBUFS; + + len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); + if (!svc_rdy_ext->n_mac_phy_caps) { + svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, + GFP_ATOMIC); + if (!svc_rdy_ext->mac_phy_caps) + return -ENOMEM; + } + + memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); + svc_rdy_ext->n_mac_phy_caps++; + return 0; +} + +static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + + if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) + return -EPROTO; + + if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) + return -ENOBUFS; + + svc_rdy_ext->n_ext_hal_reg_caps++; + return 0; +} + +static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, + u16 len, const void *ptr, void *data) +{ + struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; + int ret; + u32 i; + + svc_rdy_ext->n_ext_hal_reg_caps = 0; + svc_rdy_ext->ext_hal_reg_caps = ptr; + ret = ath12k_wmi_tlv_iter(soc, ptr, len, + ath12k_wmi_ext_hal_reg_caps_parse, + svc_rdy_ext); + if (ret) { + ath12k_warn(soc, "failed to parse tlv %d\n", ret); + return ret; + } + + for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { + ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, + svc_rdy_ext->soc_hal_reg_caps, + svc_rdy_ext->ext_hal_reg_caps, i, + ®_cap); + if (ret) { + ath12k_warn(soc, "failed to extract reg cap %d\n", i); + return ret; + } + soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; + } + return 0; +} + +static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, + u16 len, const void *ptr, + void *data) +{ + struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); + u32 phy_id_map; + int pdev_index = 0; + int ret; + + svc_rdy_ext->soc_hal_reg_caps = ptr; + svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); + + soc->num_radios = 0; + phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); + + while (phy_id_map && soc->num_radios < MAX_RADIOS) { + ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, + svc_rdy_ext, + hw_mode_id, soc->num_radios, + &soc->pdevs[pdev_index]); + if (ret) { + ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", + soc->num_radios); + return ret; + } + + soc->num_radios++; + + /* For single_pdev_only targets, + * save mac_phy capability in the same pdev + */ + if (soc->hw_params->single_pdev_only) + pdev_index = 0; + else + pdev_index = soc->num_radios; + + /* TODO: mac_phy_cap prints */ + phy_id_map >>= 1; + } + + if (soc->hw_params->single_pdev_only) { + soc->num_radios = 1; + soc->pdevs[0].pdev_id = 0; + } + + return 0; +} + +static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_dma_ring_caps_parse *parse = data; + + if (tag != WMI_TAG_DMA_RING_CAPABILITIES) + return -EPROTO; + + parse->n_dma_ring_caps++; + return 0; +} + +static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, + u32 num_cap) +{ + size_t sz; + void *ptr; + + sz = num_cap * sizeof(struct ath12k_dbring_cap); + ptr = kzalloc(sz, GFP_ATOMIC); + if (!ptr) + return -ENOMEM; + + ab->db_caps = ptr; + ab->num_db_cap = num_cap; + + return 0; +} + +static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) +{ + kfree(ab->db_caps); + ab->db_caps = NULL; +} + +static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, + u16 len, const void *ptr, void *data) +{ + struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; + struct ath12k_wmi_dma_ring_caps_params *dma_caps; + struct ath12k_dbring_cap *dir_buff_caps; + int ret; + u32 i; + + dma_caps_parse->n_dma_ring_caps = 0; + dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_dma_ring_caps_parse, + dma_caps_parse); + if (ret) { + ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); + return ret; + } + + if (!dma_caps_parse->n_dma_ring_caps) + return 0; + + if (ab->num_db_cap) { + ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); + return 0; + } + + ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); + if (ret) + return ret; + + dir_buff_caps = ab->db_caps; + for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { + if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { + ath12k_warn(ab, "Invalid module id %d\n", + le32_to_cpu(dma_caps[i].module_id)); + ret = -EINVAL; + goto free_dir_buff; + } + + dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); + dir_buff_caps[i].pdev_id = + DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); + dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); + dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); + dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); + } + + return 0; + +free_dir_buff: + ath12k_wmi_free_dbring_caps(ab); + return ret; +} + +static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; + int ret; + + switch (tag) { + case WMI_TAG_SERVICE_READY_EXT_EVENT: + ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, + &svc_rdy_ext->arg); + if (ret) { + ath12k_warn(ab, "unable to extract ext params\n"); + return ret; + } + break; + + case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: + svc_rdy_ext->hw_caps = ptr; + svc_rdy_ext->arg.num_hw_modes = + le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); + break; + + case WMI_TAG_SOC_HAL_REG_CAPABILITIES: + ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, + svc_rdy_ext); + if (ret) + return ret; + break; + + case WMI_TAG_ARRAY_STRUCT: + if (!svc_rdy_ext->hw_mode_done) { + ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); + if (ret) + return ret; + + svc_rdy_ext->hw_mode_done = true; + } else if (!svc_rdy_ext->mac_phy_done) { + svc_rdy_ext->n_mac_phy_caps = 0; + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_mac_phy_caps_parse, + svc_rdy_ext); + if (ret) { + ath12k_warn(ab, "failed to parse tlv %d\n", ret); + return ret; + } + + svc_rdy_ext->mac_phy_done = true; + } else if (!svc_rdy_ext->ext_hal_reg_done) { + ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); + if (ret) + return ret; + + svc_rdy_ext->ext_hal_reg_done = true; + } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { + svc_rdy_ext->mac_phy_chainmask_combo_done = true; + } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { + svc_rdy_ext->mac_phy_chainmask_cap_done = true; + } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { + svc_rdy_ext->oem_dma_ring_cap_done = true; + } else if (!svc_rdy_ext->dma_ring_cap_done) { + ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, + &svc_rdy_ext->dma_caps_parse); + if (ret) + return ret; + + svc_rdy_ext->dma_ring_cap_done = true; + } + break; + + default: + break; + } + return 0; +} + +static int ath12k_service_ready_ext_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_svc_rdy_ext_parse, + &svc_rdy_ext); + if (ret) { + ath12k_warn(ab, "failed to parse tlv %d\n", ret); + goto err; + } + + if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) + complete(&ab->wmi_ab.service_ready); + + kfree(svc_rdy_ext.mac_phy_caps); + return 0; + +err: + ath12k_wmi_free_dbring_caps(ab); + return ret; +} + +static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; + int ret; + + switch (tag) { + case WMI_TAG_ARRAY_STRUCT: + if (!parse->dma_ring_cap_done) { + ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, + &parse->dma_caps_parse); + if (ret) + return ret; + + parse->dma_ring_cap_done = true; + } + break; + default: + break; + } + + return 0; +} + +static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_svc_rdy_ext2_parse, + &svc_rdy_ext2); + if (ret) { + ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); + goto err; + } + + complete(&ab->wmi_ab.service_ready); + + return 0; + +err: + ath12k_wmi_free_dbring_caps(ab); + return ret; +} + +static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_vdev_start_resp_event *vdev_rsp) +{ + const void **tb; + const struct wmi_vdev_start_resp_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch vdev start resp ev"); + kfree(tb); + return -EPROTO; + } + + *vdev_rsp = *ev; + + kfree(tb); + return 0; +} + +static struct ath12k_reg_rule +*create_ext_reg_rules_from_wmi(u32 num_reg_rules, + struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) +{ + struct ath12k_reg_rule *reg_rule_ptr; + u32 count; + + reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), + GFP_ATOMIC); + + if (!reg_rule_ptr) + return NULL; + + for (count = 0; count < num_reg_rules; count++) { + reg_rule_ptr[count].start_freq = + le32_get_bits(wmi_reg_rule[count].freq_info, + REG_RULE_START_FREQ); + reg_rule_ptr[count].end_freq = + le32_get_bits(wmi_reg_rule[count].freq_info, + REG_RULE_END_FREQ); + reg_rule_ptr[count].max_bw = + le32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_MAX_BW); + reg_rule_ptr[count].reg_power = + le32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_REG_PWR); + reg_rule_ptr[count].ant_gain = + le32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_ANT_GAIN); + reg_rule_ptr[count].flags = + le32_get_bits(wmi_reg_rule[count].flag_info, + REG_RULE_FLAGS); + reg_rule_ptr[count].psd_flag = + le32_get_bits(wmi_reg_rule[count].psd_power_info, + REG_RULE_PSD_INFO); + reg_rule_ptr[count].psd_eirp = + le32_get_bits(wmi_reg_rule[count].psd_power_info, + REG_RULE_PSD_EIRP); + } + + return reg_rule_ptr; +} + +static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, + struct sk_buff *skb, + struct ath12k_reg_info *reg_info) +{ + const void **tb; + const struct wmi_reg_chan_list_cc_ext_event *ev; + struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; + u32 num_2g_reg_rules, num_5g_reg_rules; + u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u32 total_reg_rules = 0; + int ret, i, j; + + ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); + kfree(tb); + return -EPROTO; + } + + reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); + reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); + reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = + le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); + reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = + le32_to_cpu(ev->num_6g_reg_rules_ap_sp); + reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = + le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = + le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); + reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = + le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); + reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = + le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); + } + + num_2g_reg_rules = reg_info->num_2g_reg_rules; + total_reg_rules += num_2g_reg_rules; + num_5g_reg_rules = reg_info->num_5g_reg_rules; + total_reg_rules += num_5g_reg_rules; + + if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { + ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", + num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); + kfree(tb); + return -EINVAL; + } + + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { + num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; + + if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) { + ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", + i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES); + kfree(tb); + return -EINVAL; + } + + total_reg_rules += num_6g_reg_rules_ap[i]; + } + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = + reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; + total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; + + num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = + reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; + total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; + + num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = + reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; + total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; + + if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES || + num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES || + num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) { + ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", + i); + kfree(tb); + return -EINVAL; + } + } + + if (!total_reg_rules) { + ath12k_warn(ab, "No reg rules available\n"); + kfree(tb); + return -EINVAL; + } + + memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); + + /* FIXME: Currently FW includes 6G reg rule also in 5G rule + * list for country US. + * Having same 6G reg rule in 5G and 6G rules list causes + * intersect check to be true, and same rules will be shown + * multiple times in iw cmd. So added hack below to avoid + * parsing 6G rule from 5G reg rule list, and this can be + * removed later, after FW updates to remove 6G reg rule + * from 5G rules list. + */ + if (memcmp(reg_info->alpha2, "US", 2) == 0) { + reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES; + num_5g_reg_rules = reg_info->num_5g_reg_rules; + } + + reg_info->dfs_region = le32_to_cpu(ev->dfs_region); + reg_info->phybitmap = le32_to_cpu(ev->phybitmap); + reg_info->num_phy = le32_to_cpu(ev->num_phy); + reg_info->phy_id = le32_to_cpu(ev->phy_id); + reg_info->ctry_code = le32_to_cpu(ev->country_id); + reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); + + switch (le32_to_cpu(ev->status_code)) { + case WMI_REG_SET_CC_STATUS_PASS: + reg_info->status_code = REG_SET_CC_STATUS_PASS; + break; + case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: + reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; + break; + case WMI_REG_INIT_ALPHA2_NOT_FOUND: + reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; + break; + case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: + reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; + break; + case WMI_REG_SET_CC_STATUS_NO_MEMORY: + reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; + break; + case WMI_REG_SET_CC_STATUS_FAIL: + reg_info->status_code = REG_SET_CC_STATUS_FAIL; + break; + } + + reg_info->is_ext_reg_event = true; + + reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); + reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); + reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); + reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); + reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); + reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); + reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); + reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); + reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); + reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = + le32_to_cpu(ev->min_bw_6g_client_lpi[i]); + reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = + le32_to_cpu(ev->max_bw_6g_client_lpi[i]); + reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = + le32_to_cpu(ev->min_bw_6g_client_sp[i]); + reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = + le32_to_cpu(ev->max_bw_6g_client_sp[i]); + reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = + le32_to_cpu(ev->min_bw_6g_client_vlp[i]); + reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = + le32_to_cpu(ev->max_bw_6g_client_vlp[i]); + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + __func__, reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2g, reg_info->max_bw_2g, + reg_info->min_bw_5g, reg_info->max_bw_5g); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "num_2g_reg_rules %d num_5g_reg_rules %d", + num_2g_reg_rules, num_5g_reg_rules); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", + num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], + num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], + num_6g_reg_rules_ap[WMI_REG_VLP_AP]); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", + num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], + num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], + num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", + num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], + num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], + num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); + + ext_wmi_reg_rule = + (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev + + sizeof(*ev) + + sizeof(struct wmi_tlv)); + + if (num_2g_reg_rules) { + reg_info->reg_rules_2g_ptr = + create_ext_reg_rules_from_wmi(num_2g_reg_rules, + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_2g_ptr) { + kfree(tb); + ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); + return -ENOMEM; + } + } + + if (num_5g_reg_rules) { + ext_wmi_reg_rule += num_2g_reg_rules; + reg_info->reg_rules_5g_ptr = + create_ext_reg_rules_from_wmi(num_5g_reg_rules, + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_5g_ptr) { + kfree(tb); + ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); + return -ENOMEM; + } + } + + ext_wmi_reg_rule += num_5g_reg_rules; + + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { + reg_info->reg_rules_6g_ap_ptr[i] = + create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_6g_ap_ptr[i]) { + kfree(tb); + ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); + return -ENOMEM; + } + + ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; + } + + for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->reg_rules_6g_client_ptr[j][i] = + create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_6g_client_ptr[j][i]) { + kfree(tb); + ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); + return -ENOMEM; + } + + ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; + } + } + + reg_info->client_type = le32_to_cpu(ev->client_type); + reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; + reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; + reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = + le32_to_cpu(ev->domain_code_6g_ap_lpi); + reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = + le32_to_cpu(ev->domain_code_6g_ap_sp); + reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = + le32_to_cpu(ev->domain_code_6g_ap_vlp); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = + le32_to_cpu(ev->domain_code_6g_client_lpi[i]); + reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = + le32_to_cpu(ev->domain_code_6g_client_sp[i]); + reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = + le32_to_cpu(ev->domain_code_6g_client_vlp[i]); + } + + reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", + reg_info->client_type, reg_info->domain_code_6g_super_id); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); + + kfree(tb); + return 0; +} + +static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_peer_delete_resp_event *peer_del_resp) +{ + const void **tb; + const struct wmi_peer_delete_resp_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch peer delete resp ev"); + kfree(tb); + return -EPROTO; + } + + memset(peer_del_resp, 0, sizeof(*peer_del_resp)); + + peer_del_resp->vdev_id = ev->vdev_id; + ether_addr_copy(peer_del_resp->peer_macaddr.addr, + ev->peer_macaddr.addr); + + kfree(tb); + return 0; +} + +static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, + struct sk_buff *skb, + u32 *vdev_id) +{ + const void **tb; + const struct wmi_vdev_delete_resp_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch vdev delete resp ev"); + kfree(tb); + return -EPROTO; + } + + *vdev_id = le32_to_cpu(ev->vdev_id); + + kfree(tb); + return 0; +} + +static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf, + u32 len, u32 *vdev_id, + u32 *tx_status) +{ + const void **tb; + const struct wmi_bcn_tx_status_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch bcn tx status ev"); + kfree(tb); + return -EPROTO; + } + + *vdev_id = le32_to_cpu(ev->vdev_id); + *tx_status = le32_to_cpu(ev->tx_status); + + kfree(tb); + return 0; +} + +static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, + u32 *vdev_id) +{ + const void **tb; + const struct wmi_vdev_stopped_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch vdev stop ev"); + kfree(tb); + return -EPROTO; + } + + *vdev_id = le32_to_cpu(ev->vdev_id); + + kfree(tb); + return 0; +} + +static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_mgmt_rx_parse *parse = data; + + switch (tag) { + case WMI_TAG_MGMT_RX_HDR: + parse->fixed = ptr; + break; + case WMI_TAG_ARRAY_BYTE: + if (!parse->frame_buf_done) { + parse->frame_buf = ptr; + parse->frame_buf_done = true; + } + break; + } + return 0; +} + +static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, + struct sk_buff *skb, + struct ath12k_wmi_mgmt_rx_arg *hdr) +{ + struct wmi_tlv_mgmt_rx_parse parse = { }; + const struct ath12k_wmi_mgmt_rx_params *ev; + const u8 *frame; + int i, ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_tlv_mgmt_rx_parse, + &parse); + if (ret) { + ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); + return ret; + } + + ev = parse.fixed; + frame = parse.frame_buf; + + if (!ev || !frame) { + ath12k_warn(ab, "failed to fetch mgmt rx hdr"); + return -EPROTO; + } + + hdr->pdev_id = le32_to_cpu(ev->pdev_id); + hdr->chan_freq = le32_to_cpu(ev->chan_freq); + hdr->channel = le32_to_cpu(ev->channel); + hdr->snr = le32_to_cpu(ev->snr); + hdr->rate = le32_to_cpu(ev->rate); + hdr->phy_mode = le32_to_cpu(ev->phy_mode); + hdr->buf_len = le32_to_cpu(ev->buf_len); + hdr->status = le32_to_cpu(ev->status); + hdr->flags = le32_to_cpu(ev->flags); + hdr->rssi = a_sle32_to_cpu(ev->rssi); + hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); + + for (i = 0; i < ATH_MAX_ANTENNA; i++) + hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); + + if (skb->len < (frame - skb->data) + hdr->buf_len) { + ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); + return -EPROTO; + } + + /* shift the sk_buff to point to `frame` */ + skb_trim(skb, 0); + skb_put(skb, frame - skb->data); + skb_pull(skb, frame - skb->data); + skb_put(skb, hdr->buf_len); + + return 0; +} + +static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, + u32 status) +{ + struct sk_buff *msdu; + struct ieee80211_tx_info *info; + struct ath12k_skb_cb *skb_cb; + + spin_lock_bh(&ar->txmgmt_idr_lock); + msdu = idr_find(&ar->txmgmt_idr, desc_id); + + if (!msdu) { + ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", + desc_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + return -ENOENT; + } + + idr_remove(&ar->txmgmt_idr, desc_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + + skb_cb = ATH12K_SKB_CB(msdu); + dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + + info = IEEE80211_SKB_CB(msdu); + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) + info->flags |= IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + /* WARN when we received this event without doing any mgmt tx */ + if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0) + WARN_ON_ONCE(1); + + return 0; +} + +static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, + struct sk_buff *skb, + struct wmi_mgmt_tx_compl_event *param) +{ + const void **tb; + const struct wmi_mgmt_tx_compl_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); + kfree(tb); + return -EPROTO; + } + + param->pdev_id = ev->pdev_id; + param->desc_id = ev->desc_id; + param->status = ev->status; + + kfree(tb); + return 0; +} + +static void ath12k_wmi_event_scan_started(struct ath12k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", + ath12k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH12K_SCAN_STARTING: + ar->scan.state = ATH12K_SCAN_RUNNING; + complete(&ar->scan.started); + break; + } +} + +static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", + ath12k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH12K_SCAN_STARTING: + complete(&ar->scan.started); + __ath12k_mac_scan_finish(ar); + break; + } +} + +static void ath12k_wmi_event_scan_completed(struct ath12k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_STARTING: + /* One suspected reason scan can be completed while starting is + * if firmware fails to deliver all scan events to the host, + * e.g. when transport pipe is full. This has been observed + * with spectral scan phyerr events starving wmi transport + * pipe. In such case the "scan completed" event should be (and + * is) ignored by the host as it may be just firmware's scan + * state machine recovering. + */ + ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", + ath12k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + __ath12k_mac_scan_finish(ar); + break; + } +} + +static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_STARTING: + ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", + ath12k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ar->scan_channel = NULL; + break; + } +} + +static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_STARTING: + ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", + ath12k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + break; + } +} + +static const char * +ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, + enum wmi_scan_completion_reason reason) +{ + switch (type) { + case WMI_SCAN_EVENT_STARTED: + return "started"; + case WMI_SCAN_EVENT_COMPLETED: + switch (reason) { + case WMI_SCAN_REASON_COMPLETED: + return "completed"; + case WMI_SCAN_REASON_CANCELLED: + return "completed [cancelled]"; + case WMI_SCAN_REASON_PREEMPTED: + return "completed [preempted]"; + case WMI_SCAN_REASON_TIMEDOUT: + return "completed [timedout]"; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + return "completed [internal err]"; + case WMI_SCAN_REASON_MAX: + break; + } + return "completed [unknown]"; + case WMI_SCAN_EVENT_BSS_CHANNEL: + return "bss channel"; + case WMI_SCAN_EVENT_FOREIGN_CHAN: + return "foreign channel"; + case WMI_SCAN_EVENT_DEQUEUED: + return "dequeued"; + case WMI_SCAN_EVENT_PREEMPTED: + return "preempted"; + case WMI_SCAN_EVENT_START_FAILED: + return "start failed"; + case WMI_SCAN_EVENT_RESTARTED: + return "restarted"; + case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: + return "foreign channel exit"; + default: + return "unknown"; + } +} + +static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_scan_event *scan_evt_param) +{ + const void **tb; + const struct wmi_scan_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_SCAN_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch scan ev"); + kfree(tb); + return -EPROTO; + } + + scan_evt_param->event_type = ev->event_type; + scan_evt_param->reason = ev->reason; + scan_evt_param->channel_freq = ev->channel_freq; + scan_evt_param->scan_req_id = ev->scan_req_id; + scan_evt_param->scan_id = ev->scan_id; + scan_evt_param->vdev_id = ev->vdev_id; + scan_evt_param->tsf_timestamp = ev->tsf_timestamp; + + kfree(tb); + return 0; +} + +static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_peer_sta_kickout_arg *arg) +{ + const void **tb; + const struct wmi_peer_sta_kickout_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch peer sta kickout ev"); + kfree(tb); + return -EPROTO; + } + + arg->mac_addr = ev->peer_macaddr.addr; + + kfree(tb); + return 0; +} + +static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_roam_event *roam_ev) +{ + const void **tb; + const struct wmi_roam_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_ROAM_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch roam ev"); + kfree(tb); + return -EPROTO; + } + + roam_ev->vdev_id = ev->vdev_id; + roam_ev->reason = ev->reason; + roam_ev->rssi = ev->rssi; + + kfree(tb); + return 0; +} + +static int freq_to_idx(struct ath12k *ar, int freq) +{ + struct ieee80211_supported_band *sband; + int band, ch, idx = 0; + + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { + sband = ar->hw->wiphy->bands[band]; + if (!sband) + continue; + + for (ch = 0; ch < sband->n_channels; ch++, idx++) + if (sband->channels[ch].center_freq == freq) + goto exit; + } + +exit: + return idx; +} + +static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf, + u32 len, struct wmi_chan_info_event *ch_info_ev) +{ + const void **tb; + const struct wmi_chan_info_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_CHAN_INFO_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch chan info ev"); + kfree(tb); + return -EPROTO; + } + + ch_info_ev->err_code = ev->err_code; + ch_info_ev->freq = ev->freq; + ch_info_ev->cmd_flags = ev->cmd_flags; + ch_info_ev->noise_floor = ev->noise_floor; + ch_info_ev->rx_clear_count = ev->rx_clear_count; + ch_info_ev->cycle_count = ev->cycle_count; + ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; + ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + ch_info_ev->rx_frame_count = ev->rx_frame_count; + ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; + ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; + ch_info_ev->vdev_id = ev->vdev_id; + + kfree(tb); + return 0; +} + +static int +ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) +{ + const void **tb; + const struct wmi_pdev_bss_chan_info_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); + kfree(tb); + return -EPROTO; + } + + bss_ch_info_ev->pdev_id = ev->pdev_id; + bss_ch_info_ev->freq = ev->freq; + bss_ch_info_ev->noise_floor = ev->noise_floor; + bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; + bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; + bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; + bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; + bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; + bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; + bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; + bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; + bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; + bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; + + kfree(tb); + return 0; +} + +static int +ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_vdev_install_key_complete_arg *arg) +{ + const void **tb; + const struct wmi_vdev_install_key_compl_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch vdev install key compl ev"); + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = le32_to_cpu(ev->vdev_id); + arg->macaddr = ev->peer_macaddr.addr; + arg->key_idx = le32_to_cpu(ev->key_idx); + arg->key_flags = le32_to_cpu(ev->key_flags); + arg->status = le32_to_cpu(ev->status); + + kfree(tb); + return 0; +} + +static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, + struct wmi_peer_assoc_conf_arg *peer_assoc_conf) +{ + const void **tb; + const struct wmi_peer_assoc_conf_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch peer assoc conf ev"); + kfree(tb); + return -EPROTO; + } + + peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); + peer_assoc_conf->macaddr = ev->peer_macaddr.addr; + + kfree(tb); + return 0; +} + +static int +ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf, + u32 len, const struct wmi_pdev_temperature_event *ev) +{ + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch pdev temp ev"); + kfree(tb); + return -EPROTO; + } + + kfree(tb); + return 0; +} + +static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) +{ + /* try to send pending beacons first. they take priority */ + wake_up(&ab->wmi_ab.tx_credits_wq); +} + +static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, + struct sk_buff *skb) +{ + dev_kfree_skb(skb); +} + +static bool ath12k_reg_is_world_alpha(char *alpha) +{ + return alpha[0] == '0' && alpha[1] == '0'; +} + +static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k_reg_info *reg_info = NULL; + struct ieee80211_regdomain *regd = NULL; + bool intersect = false; + int ret = 0, pdev_idx, i, j; + struct ath12k *ar; + + reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); + if (!reg_info) { + ret = -ENOMEM; + goto fallback; + } + + ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); + + if (ret) { + ath12k_warn(ab, "failed to extract regulatory info from received event\n"); + goto fallback; + } + + if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { + /* In case of failure to set the requested ctry, + * fw retains the current regd. We print a failure info + * and return from here. + */ + ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n"); + goto mem_free; + } + + pdev_idx = reg_info->phy_id; + + if (pdev_idx >= ab->num_radios) { + /* Process the event for phy0 only if single_pdev_only + * is true. If pdev_idx is valid but not 0, discard the + * event. Otherwise, it goes to fallback. + */ + if (ab->hw_params->single_pdev_only && + pdev_idx < ab->hw_params->num_rxmda_per_pdev) + goto mem_free; + else + goto fallback; + } + + /* Avoid multiple overwrites to default regd, during core + * stop-start after mac registration. + */ + if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && + !memcmp(ab->default_regd[pdev_idx]->alpha2, + reg_info->alpha2, 2)) + goto mem_free; + + /* Intersect new rules with default regd if a new country setting was + * requested, i.e a default regd was already set during initialization + * and the regd coming from this event has a valid country info. + */ + if (ab->default_regd[pdev_idx] && + !ath12k_reg_is_world_alpha((char *) + ab->default_regd[pdev_idx]->alpha2) && + !ath12k_reg_is_world_alpha((char *)reg_info->alpha2)) + intersect = true; + + regd = ath12k_reg_build_regd(ab, reg_info, intersect); + if (!regd) { + ath12k_warn(ab, "failed to build regd from reg_info\n"); + goto fallback; + } + + spin_lock(&ab->base_lock); + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { + /* Once mac is registered, ar is valid and all CC events from + * fw is considered to be received due to user requests + * currently. + * Free previously built regd before assigning the newly + * generated regd to ar. NULL pointer handling will be + * taken care by kfree itself. + */ + ar = ab->pdevs[pdev_idx].ar; + kfree(ab->new_regd[pdev_idx]); + ab->new_regd[pdev_idx] = regd; + ieee80211_queue_work(ar->hw, &ar->regd_update_work); + } else { + /* Multiple events for the same *ar is not expected. But we + * can still clear any previously stored default_regd if we + * are receiving this event for the same radio by mistake. + * NULL pointer handling will be taken care by kfree itself. + */ + kfree(ab->default_regd[pdev_idx]); + /* This regd would be applied during mac registration */ + ab->default_regd[pdev_idx] = regd; + } + ab->dfs_region = reg_info->dfs_region; + spin_unlock(&ab->base_lock); + + goto mem_free; + +fallback: + /* Fallback to older reg (by sending previous country setting + * again if fw has succeeded and we failed to process here. + * The Regdomain should be uniform across driver and fw. Since the + * FW has processed the command and sent a success status, we expect + * this function to succeed as well. If it doesn't, CTRY needs to be + * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. + */ + /* TODO: This is rare, but still should also be handled */ + WARN_ON(1); +mem_free: + if (reg_info) { + kfree(reg_info->reg_rules_2g_ptr); + kfree(reg_info->reg_rules_5g_ptr); + if (reg_info->is_ext_reg_event) { + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) + kfree(reg_info->reg_rules_6g_ap_ptr[i]); + + for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) + kfree(reg_info->reg_rules_6g_client_ptr[j][i]); + } + kfree(reg_info); + } + return ret; +} + +static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath12k_wmi_rdy_parse *rdy_parse = data; + struct wmi_ready_event fixed_param; + struct ath12k_wmi_mac_addr_params *addr_list; + struct ath12k_pdev *pdev; + u32 num_mac_addr; + int i; + + switch (tag) { + case WMI_TAG_READY_EVENT: + memset(&fixed_param, 0, sizeof(fixed_param)); + memcpy(&fixed_param, (struct wmi_ready_event *)ptr, + min_t(u16, sizeof(fixed_param), len)); + ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); + rdy_parse->num_extra_mac_addr = + le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); + + ether_addr_copy(ab->mac_addr, + fixed_param.ready_event_min.mac_addr.addr); + ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); + ab->wmi_ready = true; + break; + case WMI_TAG_ARRAY_FIXED_STRUCT: + addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; + num_mac_addr = rdy_parse->num_extra_mac_addr; + + if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) + break; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ether_addr_copy(pdev->mac_addr, addr_list[i].addr); + } + ab->pdevs_macaddr_valid = true; + break; + default: + break; + } + + return 0; +} + +static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k_wmi_rdy_parse rdy_parse = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_rdy_parse, &rdy_parse); + if (ret) { + ath12k_warn(ab, "failed to parse tlv %d\n", ret); + return ret; + } + + complete(&ab->wmi_ab.unified_ready); + return 0; +} + +static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_delete_resp_event peer_del_resp; + struct ath12k *ar; + + if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { + ath12k_warn(ab, "failed to extract peer delete resp"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", + peer_del_resp.vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->peer_delete_done); + rcu_read_unlock(); + ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", + peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); +} + +static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k *ar; + u32 vdev_id = 0; + + if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { + ath12k_warn(ab, "failed to extract vdev delete resp"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", + vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->vdev_delete_done); + + rcu_read_unlock(); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", + vdev_id); +} + +static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) +{ + switch (vdev_resp_status) { + case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: + return "invalid vdev id"; + case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: + return "not supported"; + case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: + return "dfs violation"; + case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: + return "invalid regdomain"; + default: + return "unknown"; + } +} + +static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_vdev_start_resp_event vdev_start_resp; + struct ath12k *ar; + u32 status; + + if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { + ath12k_warn(ab, "failed to extract vdev start resp"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", + vdev_start_resp.vdev_id); + rcu_read_unlock(); + return; + } + + ar->last_wmi_vdev_start_status = 0; + + status = le32_to_cpu(vdev_start_resp.status); + + if (WARN_ON_ONCE(status)) { + ath12k_warn(ab, "vdev start resp error status %d (%s)\n", + status, ath12k_wmi_vdev_resp_print(status)); + ar->last_wmi_vdev_start_status = status; + } + + complete(&ar->vdev_setup_done); + + rcu_read_unlock(); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", + vdev_start_resp.vdev_id); +} + +static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + u32 vdev_id, tx_status; + + if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, + &vdev_id, &tx_status) != 0) { + ath12k_warn(ab, "failed to extract bcn tx status"); + return; + } +} + +static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k *ar; + u32 vdev_id = 0; + + if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { + ath12k_warn(ab, "failed to extract vdev stopped event"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", + vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->vdev_setup_done); + + rcu_read_unlock(); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); +} + +static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k_wmi_mgmt_rx_arg rx_ev = {0}; + struct ath12k *ar; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr; + u16 fc; + struct ieee80211_supported_band *sband; + + if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { + ath12k_warn(ab, "failed to extract mgmt rx event"); + dev_kfree_skb(skb); + return; + } + + memset(status, 0, sizeof(*status)); + + ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", + rx_ev.status); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); + + if (!ar) { + ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", + rx_ev.pdev_id); + dev_kfree_skb(skb); + goto exit; + } + + if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) || + (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | + WMI_RX_STATUS_ERR_KEY_CACHE_MISS | + WMI_RX_STATUS_ERR_CRC))) { + dev_kfree_skb(skb); + goto exit; + } + + if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) { + status->band = NL80211_BAND_6GHZ; + } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { + status->band = NL80211_BAND_2GHZ; + } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) { + status->band = NL80211_BAND_5GHZ; + } else { + /* Shouldn't happen unless list of advertised channels to + * mac80211 has been changed. + */ + WARN_ON_ONCE(1); + dev_kfree_skb(skb); + goto exit; + } + + if (rx_ev.phy_mode == MODE_11B && + (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) + ath12k_dbg(ab, ATH12K_DBG_WMI, + "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); + + sband = &ar->mac.sbands[status->band]; + + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR; + status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control); + + /* Firmware is guaranteed to report all essential management frames via + * WMI while it can deliver some extra via HTT. Since there can be + * duplicates split the reporting wrt monitor/sniffing. + */ + status->flag |= RX_FLAG_SKIP_MONITOR; + + /* In case of PMF, FW delivers decrypted frames with Protected Bit set + * including group privacy action frames. + */ + if (ieee80211_has_protected(hdr->frame_control)) { + status->flag |= RX_FLAG_DECRYPTED; + + if (!ieee80211_is_robust_mgmt_frame(skb)) { + status->flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + hdr->frame_control = __cpu_to_le16(fc & + ~IEEE80211_FCTL_PROTECTED); + } + } + + /* TODO: Pending handle beacon implementation + *if (ieee80211_is_beacon(hdr->frame_control)) + * ath12k_mac_handle_beacon(ar, skb); + */ + + ath12k_dbg(ab, ATH12K_DBG_MGMT, + "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", + skb, skb->len, + fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); + + ath12k_dbg(ab, ATH12K_DBG_MGMT, + "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", + status->freq, status->band, status->signal, + status->rate_idx); + + ieee80211_rx_ni(ar->hw, skb); + +exit: + rcu_read_unlock(); +} + +static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_mgmt_tx_compl_event tx_compl_param = {0}; + struct ath12k *ar; + + if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { + ath12k_warn(ab, "failed to extract mgmt tx compl event"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", + tx_compl_param.pdev_id); + goto exit; + } + + wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), + le32_to_cpu(tx_compl_param.status)); + + ath12k_dbg(ab, ATH12K_DBG_MGMT, + "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", + tx_compl_param.pdev_id, tx_compl_param.desc_id, + tx_compl_param.status); + +exit: + rcu_read_unlock(); +} + +static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, + u32 vdev_id) +{ + int i; + struct ath12k_pdev *pdev; + struct ath12k *ar; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + ar = pdev->ar; + + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ATH12K_SCAN_ABORTING && + ar->scan.vdev_id == vdev_id) { + spin_unlock_bh(&ar->data_lock); + return ar; + } + spin_unlock_bh(&ar->data_lock); + } + } + return NULL; +} + +static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct ath12k *ar; + struct wmi_scan_event scan_ev = {0}; + + if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { + ath12k_warn(ab, "failed to extract scan event"); + return; + } + + rcu_read_lock(); + + /* In case the scan was cancelled, ex. during interface teardown, + * the interface will not be found in active interfaces. + * Rather, in such scenarios, iterate over the active pdev's to + * search 'ar' if the corresponding 'ar' scan is ABORTING and the + * aborting scan's vdev id matches this event info. + */ + if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && + le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) + ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id)); + else + ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); + + if (!ar) { + ath12k_warn(ab, "Received scan event for unknown vdev"); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&ar->data_lock); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", + ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), + le32_to_cpu(scan_ev.reason)), + le32_to_cpu(scan_ev.event_type), + le32_to_cpu(scan_ev.reason), + le32_to_cpu(scan_ev.channel_freq), + le32_to_cpu(scan_ev.scan_req_id), + le32_to_cpu(scan_ev.scan_id), + le32_to_cpu(scan_ev.vdev_id), + ath12k_scan_state_str(ar->scan.state), ar->scan.state); + + switch (le32_to_cpu(scan_ev.event_type)) { + case WMI_SCAN_EVENT_STARTED: + ath12k_wmi_event_scan_started(ar); + break; + case WMI_SCAN_EVENT_COMPLETED: + ath12k_wmi_event_scan_completed(ar); + break; + case WMI_SCAN_EVENT_BSS_CHANNEL: + ath12k_wmi_event_scan_bss_chan(ar); + break; + case WMI_SCAN_EVENT_FOREIGN_CHAN: + ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); + break; + case WMI_SCAN_EVENT_START_FAILED: + ath12k_warn(ab, "received scan start failure event\n"); + ath12k_wmi_event_scan_start_failed(ar); + break; + case WMI_SCAN_EVENT_DEQUEUED: + case WMI_SCAN_EVENT_PREEMPTED: + case WMI_SCAN_EVENT_RESTARTED: + case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: + default: + break; + } + + spin_unlock_bh(&ar->data_lock); + + rcu_read_unlock(); +} + +static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_sta_kickout_arg arg = {}; + struct ieee80211_sta *sta; + struct ath12k_peer *peer; + struct ath12k *ar; + + if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { + ath12k_warn(ab, "failed to extract peer sta kickout event"); + return; + } + + rcu_read_lock(); + + spin_lock_bh(&ab->base_lock); + + peer = ath12k_peer_find_by_addr(ab, arg.mac_addr); + + if (!peer) { + ath12k_warn(ab, "peer not found %pM\n", + arg.mac_addr); + goto exit; + } + + ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d", + peer->vdev_id); + goto exit; + } + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, + arg.mac_addr, NULL); + if (!sta) { + ath12k_warn(ab, "Spurious quick kickout for STA %pM\n", + arg.mac_addr); + goto exit; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM", + arg.mac_addr); + + ieee80211_report_low_ack(sta, 10); + +exit: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +} + +static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_roam_event roam_ev = {}; + struct ath12k *ar; + + if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { + ath12k_warn(ab, "failed to extract roam event"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "wmi roam event vdev %u reason 0x%08x rssi %d\n", + roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in roam ev %d", + roam_ev.vdev_id); + rcu_read_unlock(); + return; + } + + if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX) + ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", + roam_ev.reason, roam_ev.vdev_id); + + switch (le32_to_cpu(roam_ev.reason)) { + case WMI_ROAM_REASON_BEACON_MISS: + /* TODO: Pending beacon miss and connection_loss_work + * implementation + * ath12k_mac_handle_beacon_miss(ar, vdev_id); + */ + break; + case WMI_ROAM_REASON_BETTER_AP: + case WMI_ROAM_REASON_LOW_RSSI: + case WMI_ROAM_REASON_SUITABLE_AP_FOUND: + case WMI_ROAM_REASON_HO_FAILED: + ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", + roam_ev.reason, roam_ev.vdev_id); + break; + } + + rcu_read_unlock(); +} + +static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_chan_info_event ch_info_ev = {0}; + struct ath12k *ar; + struct survey_info *survey; + int idx; + /* HW channel counters frequency value in hertz */ + u32 cc_freq_hz = ab->cc_freq_hz; + + if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { + ath12k_warn(ab, "failed to extract chan info event"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", + ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, + ch_info_ev.cmd_flags, ch_info_ev.noise_floor, + ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, + ch_info_ev.mac_clk_mhz); + + if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { + ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); + return; + } + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in chan info ev %d", + ch_info_ev.vdev_id); + rcu_read_unlock(); + return; + } + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ATH12K_SCAN_IDLE: + case ATH12K_SCAN_STARTING: + ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); + goto exit; + case ATH12K_SCAN_RUNNING: + case ATH12K_SCAN_ABORTING: + break; + } + + idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", + ch_info_ev.freq, idx); + goto exit; + } + + /* If FW provides MAC clock frequency in Mhz, overriding the initialized + * HW channel counters frequency value + */ + if (ch_info_ev.mac_clk_mhz) + cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); + + if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { + survey = &ar->survey[idx]; + memset(survey, 0, sizeof(*survey)); + survey->noise = le32_to_cpu(ch_info_ev.noise_floor); + survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; + survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); + survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), + cc_freq_hz); + } +exit: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + +static void +ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; + struct survey_info *survey; + struct ath12k *ar; + u32 cc_freq_hz = ab->cc_freq_hz; + u64 busy, total, tx, rx, rx_bss; + int idx; + + if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { + ath12k_warn(ab, "failed to extract pdev bss chan info event"); + return; + } + + busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | + le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); + + total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | + le32_to_cpu(bss_ch_info_ev.cycle_count_low); + + tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | + le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); + + rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | + le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); + + rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | + le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", + bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, + bss_ch_info_ev.noise_floor, busy, total, + tx, rx, rx_bss); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); + + if (!ar) { + ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", + bss_ch_info_ev.pdev_id); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&ar->data_lock); + idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", + bss_ch_info_ev.freq, idx); + goto exit; + } + + survey = &ar->survey[idx]; + + survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); + survey->time = div_u64(total, cc_freq_hz); + survey->time_busy = div_u64(busy, cc_freq_hz); + survey->time_rx = div_u64(rx_bss, cc_freq_hz); + survey->time_tx = div_u64(tx, cc_freq_hz); + survey->filled |= (SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX); +exit: + spin_unlock_bh(&ar->data_lock); + complete(&ar->bss_survey_done); + + rcu_read_unlock(); +} + +static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct wmi_vdev_install_key_complete_arg install_key_compl = {0}; + struct ath12k *ar; + + if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { + ath12k_warn(ab, "failed to extract install key compl event"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", + install_key_compl.key_idx, install_key_compl.key_flags, + install_key_compl.macaddr, install_key_compl.status); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); + if (!ar) { + ath12k_warn(ab, "invalid vdev id in install key compl ev %d", + install_key_compl.vdev_id); + rcu_read_unlock(); + return; + } + + ar->install_key_status = 0; + + if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { + ath12k_warn(ab, "install key failed for %pM status %d\n", + install_key_compl.macaddr, install_key_compl.status); + ar->install_key_status = install_key_compl.status; + } + + complete(&ar->install_key_done); + rcu_read_unlock(); +} + +static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct wmi_service_available_event *ev; + int ret; + int i, j; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch svc available ev"); + kfree(tb); + return; + } + + /* TODO: Use wmi_service_segment_offset information to get the service + * especially when more services are advertised in multiple sevice + * available events. + */ + for (i = 0, j = WMI_MAX_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; + i++) { + do { + if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", + ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], + ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); + + kfree(tb); +} + +static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0}; + struct ath12k *ar; + + if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { + ath12k_warn(ab, "failed to extract peer assoc conf event"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "peer assoc conf ev vdev id %d macaddr %pM\n", + peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); + + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); + + if (!ar) { + ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", + peer_assoc_conf.vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->peer_assoc_done); + rcu_read_unlock(); +} + +static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) +{ +} + +/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned + * is not part of BDF CTL(Conformance test limits) table entries. + */ +static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_pdev_ctl_failsafe_chk_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "pdev ctl failsafe check ev status %d\n", + ev->ctl_failsafe_status); + + /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power + * to 10 dBm else the CTL power entry in the BDF would be picked up. + */ + if (ev->ctl_failsafe_status != 0) + ath12k_warn(ab, "pdev ctl failsafe failure status %d", + ev->ctl_failsafe_status); + + kfree(tb); +} + +static void +ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, + const struct ath12k_wmi_pdev_csa_event *ev, + const u32 *vdev_ids) +{ + int i; + struct ath12k_vif *arvif; + + /* Finish CSA once the switch count becomes NULL */ + if (ev->current_switch_count) + return; + + rcu_read_lock(); + for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) { + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); + + if (!arvif) { + ath12k_warn(ab, "Recvd csa status for unknown vdev %d", + vdev_ids[i]); + continue; + } + + if (arvif->is_up && arvif->vif->bss_conf.csa_active) + ieee80211_csa_finish(arvif->vif); + } + rcu_read_unlock(); +} + +static void +ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct ath12k_wmi_pdev_csa_event *ev; + const u32 *vdev_ids; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; + vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; + + if (!ev || !vdev_ids) { + ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "pdev csa switch count %d for pdev %d, num_vdevs %d", + ev->current_switch_count, ev->pdev_id, + ev->num_vdevs); + + ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); + + kfree(tb); +} + +static void +ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct ath12k_wmi_pdev_radar_event *ev; + struct ath12k *ar; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; + + if (!ev) { + ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", + ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, + ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, + ev->freq_offset, ev->sidx); + + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); + + if (!ar) { + ath12k_warn(ab, "radar detected in invalid pdev %d\n", + ev->pdev_id); + goto exit; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", + ev->pdev_id); + + if (ar->dfs_block_radar_events) + ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); + else + ieee80211_radar_detected(ar->hw); + +exit: + kfree(tb); +} + +static void +ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k *ar; + struct wmi_pdev_temperature_event ev = {0}; + + if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { + ath12k_warn(ab, "failed to extract pdev temperature event"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); + + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); + return; + } +} + +static void ath12k_fils_discovery_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_fils_discovery_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, + "failed to parse FILS discovery event tlv %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch FILS discovery event\n"); + kfree(tb); + return; + } + + ath12k_warn(ab, + "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", + ev->vdev_id, ev->fils_tt, ev->tbtt); + + kfree(tb); +} + +static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_probe_resp_tx_status_event *ev; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, + "failed to parse probe response transmission status event tlv: %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; + if (!ev) { + ath12k_warn(ab, + "failed to fetch probe response transmission status event"); + kfree(tb); + return; + } + + if (ev->tx_status) + ath12k_warn(ab, + "Probe response transmission failed for vdev_id %u, status %u\n", + ev->vdev_id, ev->tx_status); + + kfree(tb); +} + +static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_tlv_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); + + if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) + goto out; + + switch (id) { + /* Process all the WMI events here */ + case WMI_SERVICE_READY_EVENTID: + ath12k_service_ready_event(ab, skb); + break; + case WMI_SERVICE_READY_EXT_EVENTID: + ath12k_service_ready_ext_event(ab, skb); + break; + case WMI_SERVICE_READY_EXT2_EVENTID: + ath12k_service_ready_ext2_event(ab, skb); + break; + case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: + ath12k_reg_chan_list_event(ab, skb); + break; + case WMI_READY_EVENTID: + ath12k_ready_event(ab, skb); + break; + case WMI_PEER_DELETE_RESP_EVENTID: + ath12k_peer_delete_resp_event(ab, skb); + break; + case WMI_VDEV_START_RESP_EVENTID: + ath12k_vdev_start_resp_event(ab, skb); + break; + case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: + ath12k_bcn_tx_status_event(ab, skb); + break; + case WMI_VDEV_STOPPED_EVENTID: + ath12k_vdev_stopped_event(ab, skb); + break; + case WMI_MGMT_RX_EVENTID: + ath12k_mgmt_rx_event(ab, skb); + /* mgmt_rx_event() owns the skb now! */ + return; + case WMI_MGMT_TX_COMPLETION_EVENTID: + ath12k_mgmt_tx_compl_event(ab, skb); + break; + case WMI_SCAN_EVENTID: + ath12k_scan_event(ab, skb); + break; + case WMI_PEER_STA_KICKOUT_EVENTID: + ath12k_peer_sta_kickout_event(ab, skb); + break; + case WMI_ROAM_EVENTID: + ath12k_roam_event(ab, skb); + break; + case WMI_CHAN_INFO_EVENTID: + ath12k_chan_info_event(ab, skb); + break; + case WMI_PDEV_BSS_CHAN_INFO_EVENTID: + ath12k_pdev_bss_chan_info_event(ab, skb); + break; + case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: + ath12k_vdev_install_key_compl_event(ab, skb); + break; + case WMI_SERVICE_AVAILABLE_EVENTID: + ath12k_service_available_event(ab, skb); + break; + case WMI_PEER_ASSOC_CONF_EVENTID: + ath12k_peer_assoc_conf_event(ab, skb); + break; + case WMI_UPDATE_STATS_EVENTID: + ath12k_update_stats_event(ab, skb); + break; + case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: + ath12k_pdev_ctl_failsafe_check_event(ab, skb); + break; + case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: + ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); + break; + case WMI_PDEV_TEMPERATURE_EVENTID: + ath12k_wmi_pdev_temperature_event(ab, skb); + break; + case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: + ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); + break; + case WMI_HOST_FILS_DISCOVERY_EVENTID: + ath12k_fils_discovery_event(ab, skb); + break; + case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: + ath12k_probe_resp_tx_status_event(ab, skb); + break; + /* add Unsupported events here */ + case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: + case WMI_PEER_OPER_MODE_CHANGE_EVENTID: + case WMI_TWT_ENABLE_EVENTID: + case WMI_TWT_DISABLE_EVENTID: + case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: + ath12k_dbg(ab, ATH12K_DBG_WMI, + "ignoring unsupported event 0x%x\n", id); + break; + case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: + ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); + break; + case WMI_VDEV_DELETE_RESP_EVENTID: + ath12k_vdev_delete_resp_event(ab, skb); + break; + /* TODO: Add remaining events */ + default: + ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); + break; + } + +out: + dev_kfree_skb(skb); +} + +static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, + u32 pdev_idx) +{ + int status; + u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; + struct ath12k_htc_svc_conn_req conn_req = {}; + struct ath12k_htc_svc_conn_resp conn_resp = {}; + + /* these fields are the same for all service endpoints */ + conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; + conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; + + /* connect to control service */ + conn_req.service_id = svc_id[pdev_idx]; + + status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); + if (status) { + ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", + status); + return status; + } + + ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; + ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; + ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; + + return 0; +} + +static int +ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, + struct wmi_unit_test_cmd ut_cmd, + u32 *test_args) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_unit_test_cmd *cmd; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + u32 *ut_cmd_args; + int buf_len, arg_len; + int ret; + int i; + + arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); + buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_unit_test_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, + sizeof(ut_cmd)); + + cmd->vdev_id = ut_cmd.vdev_id; + cmd->module_id = ut_cmd.module_id; + cmd->num_args = ut_cmd.num_args; + cmd->diag_token = ut_cmd.diag_token; + + ptr = skb->data + sizeof(ut_cmd); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); + + ptr += TLV_HDR_SIZE; + + ut_cmd_args = ptr; + for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) + ut_cmd_args[i] = test_args[i]; + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI unit test : module %d vdev %d n_args %d token %d\n", + cmd->module_id, cmd->vdev_id, cmd->num_args, + cmd->diag_token); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); + + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_simulate_radar(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + u32 dfs_args[DFS_MAX_TEST_ARGS]; + struct wmi_unit_test_cmd wmi_ut; + bool arvif_found = false; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { + arvif_found = true; + break; + } + } + + if (!arvif_found) + return -EINVAL; + + dfs_args[DFS_TEST_CMDID] = 0; + dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; + /* Currently we could pass segment_id(b0 - b1), chirp(b2) + * freq offset (b3 - b10) to unit test. For simulation + * purpose this can be set to 0 which is valid. + */ + dfs_args[DFS_TEST_RADAR_PARAM] = 0; + + wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); + wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); + wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); + wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); + + ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); + + return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); +} + +int ath12k_wmi_connect(struct ath12k_base *ab) +{ + u32 i; + u8 wmi_ep_count; + + wmi_ep_count = ab->htc.wmi_ep_count; + if (wmi_ep_count > ab->hw_params->max_radios) + return -1; + + for (i = 0; i < wmi_ep_count; i++) + ath12k_connect_pdev_htc_service(ab, i); + + return 0; +} + +static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) +{ + if (WARN_ON(pdev_id >= MAX_RADIOS)) + return; + + /* TODO: Deinit any pdev specific wmi resource */ +} + +int ath12k_wmi_pdev_attach(struct ath12k_base *ab, + u8 pdev_id) +{ + struct ath12k_wmi_pdev *wmi_handle; + + if (pdev_id >= ab->hw_params->max_radios) + return -EINVAL; + + wmi_handle = &ab->wmi_ab.wmi[pdev_id]; + + wmi_handle->wmi_ab = &ab->wmi_ab; + + ab->wmi_ab.ab = ab; + /* TODO: Init remaining resource specific to pdev */ + + return 0; +} + +int ath12k_wmi_attach(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_wmi_pdev_attach(ab, 0); + if (ret) + return ret; + + ab->wmi_ab.ab = ab; + ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; + + /* It's overwritten when service_ext_ready is handled */ + if (ab->hw_params->single_pdev_only) + ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; + + /* TODO: Init remaining wmi soc resources required */ + init_completion(&ab->wmi_ab.service_ready); + init_completion(&ab->wmi_ab.unified_ready); + + return 0; +} + +void ath12k_wmi_detach(struct ath12k_base *ab) +{ + int i; + + /* TODO: Deinit wmi resource specific to SOC as required */ + + for (i = 0; i < ab->htc.wmi_ep_count; i++) + ath12k_wmi_pdev_detach(ab, i); + + ath12k_wmi_free_dbring_caps(ab); +} diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h new file mode 100644 index 000000000000..84e3fb918e43 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -0,0 +1,4803 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_WMI_H +#define ATH12K_WMI_H + +#include <net/mac80211.h> +#include "htc.h" + +/* Naming conventions for structures: + * + * _cmd means that this is a firmware command sent from host to firmware. + * + * _event means that this is a firmware event sent from firmware to host + * + * _params is a structure which is embedded either into _cmd or _event (or + * both), it is not sent individually. + * + * _arg is used inside the host, the firmware does not see that at all. + */ + +struct ath12k_base; +struct ath12k; + +/* There is no signed version of __le32, so for a temporary solution come + * up with our own version. The idea is from fs/ntfs/endian.h. + * + * Use a_ prefix so that it doesn't conflict if we get proper support to + * linux/types.h. + */ +typedef __s32 __bitwise a_sle32; + +static inline a_sle32 a_cpu_to_sle32(s32 val) +{ + return (__force a_sle32)cpu_to_le32(val); +} + +static inline s32 a_sle32_to_cpu(a_sle32 val) +{ + return le32_to_cpu((__force __le32)val); +} + +/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */ +#define MAX_HE_NSS 8 +#define MAX_HE_MODULATION 8 +#define MAX_HE_RU 4 +#define HE_MODULATION_NONE 7 +#define HE_PET_0_USEC 0 +#define HE_PET_8_USEC 1 +#define HE_PET_16_USEC 2 + +#define WMI_MAX_CHAINS 8 + +#define WMI_MAX_NUM_SS MAX_HE_NSS +#define WMI_MAX_NUM_RU MAX_HE_RU + +#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_TLV_CMD_UNSUPPORTED 0 +#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0 +#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 + +struct wmi_cmd_hdr { + __le32 cmd_id; +} __packed; + +struct wmi_tlv { + __le32 header; + u8 value[]; +} __packed; + +#define WMI_TLV_LEN GENMASK(15, 0) +#define WMI_TLV_TAG GENMASK(31, 16) +#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header) + +#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0) +#define WMI_MAX_MEM_REQS 32 +#define ATH12K_MAX_HW_LISTEN_INTERVAL 5 + +#define WMI_HOST_RC_DS_FLAG 0x01 +#define WMI_HOST_RC_CW40_FLAG 0x02 +#define WMI_HOST_RC_SGI_FLAG 0x04 +#define WMI_HOST_RC_HT_FLAG 0x08 +#define WMI_HOST_RC_RTSCTS_FLAG 0x10 +#define WMI_HOST_RC_TX_STBC_FLAG 0x20 +#define WMI_HOST_RC_RX_STBC_FLAG 0xC0 +#define WMI_HOST_RC_RX_STBC_FLAG_S 6 +#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100 +#define WMI_HOST_RC_TS_FLAG 0x200 +#define WMI_HOST_RC_UAPSD_FLAG 0x400 + +#define WMI_HT_CAP_ENABLED 0x0001 +#define WMI_HT_CAP_HT20_SGI 0x0002 +#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 +#define WMI_HT_CAP_TX_STBC 0x0008 +#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3 +#define WMI_HT_CAP_RX_STBC 0x0030 +#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4 +#define WMI_HT_CAP_LDPC 0x0040 +#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 +#define WMI_HT_CAP_MPDU_DENSITY 0x0700 +#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 +#define WMI_HT_CAP_HT40_SGI 0x0800 +#define WMI_HT_CAP_RX_LDPC 0x1000 +#define WMI_HT_CAP_TX_LDPC 0x2000 +#define WMI_HT_CAP_IBF_BFER 0x4000 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. + */ +#define WMI_HT_CAP_RX_STBC_1SS 0x0010 +#define WMI_HT_CAP_RX_STBC_2SS 0x0020 +#define WMI_HT_CAP_RX_STBC_3SS 0x0030 + +#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \ + WMI_HT_CAP_HT20_SGI | \ + WMI_HT_CAP_HT40_SGI | \ + WMI_HT_CAP_TX_STBC | \ + WMI_HT_CAP_RX_STBC | \ + WMI_HT_CAP_LDPC) + +#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 +#define WMI_VHT_CAP_RX_LDPC 0x00000010 +#define WMI_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_VHT_CAP_SGI_160MHZ 0x00000040 +#define WMI_VHT_CAP_TX_STBC 0x00000080 +#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 +#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_VHT_CAP_SU_BFER 0x00000800 +#define WMI_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_VHT_CAP_MU_BFER 0x00080000 +#define WMI_VHT_CAP_MU_BFEE 0x00100000 +#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23 +#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 +#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000 + +#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. + */ +#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100 +#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200 +#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300 + +#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \ + WMI_VHT_CAP_SGI_80MHZ | \ + WMI_VHT_CAP_TX_STBC | \ + WMI_VHT_CAP_RX_STBC_MASK | \ + WMI_VHT_CAP_RX_LDPC | \ + WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \ + WMI_VHT_CAP_RX_FIXED_ANT | \ + WMI_VHT_CAP_TX_FIXED_ANT) + +#define WLAN_SCAN_MAX_HINT_S_SSID 10 +#define WLAN_SCAN_MAX_HINT_BSSID 10 +#define MAX_RNR_BSS 5 + +#define WLAN_SCAN_MAX_HINT_S_SSID 10 +#define WLAN_SCAN_MAX_HINT_BSSID 10 +#define MAX_RNR_BSS 5 + +#define WLAN_SCAN_PARAMS_MAX_SSID 16 +#define WLAN_SCAN_PARAMS_MAX_BSSID 4 +#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 + +#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1 + +#define WMI_BA_MODE_BUFFER_SIZE_256 3 + +/* HW mode config type replicated from FW header + * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active. + * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands, + * one in 2G and another in 5G. + * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in + * same band; no tx allowed. + * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band. + * Support for both PHYs within one band is planned + * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES), + * but could be extended to other bands in the future. + * The separation of the band between the two PHYs needs + * to be communicated separately. + * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS + * as in WMI_HW_MODE_SBS, and 3rd on the other band + * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and + * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G). + * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode. + */ +enum wmi_host_hw_mode_config_type { + WMI_HOST_HW_MODE_SINGLE = 0, + WMI_HOST_HW_MODE_DBS = 1, + WMI_HOST_HW_MODE_SBS_PASSIVE = 2, + WMI_HOST_HW_MODE_SBS = 3, + WMI_HOST_HW_MODE_DBS_SBS = 4, + WMI_HOST_HW_MODE_DBS_OR_SBS = 5, + + /* keep last */ + WMI_HOST_HW_MODE_MAX +}; + +/* HW mode priority values used to detect the preferred HW mode + * on the available modes. + */ +enum wmi_host_hw_mode_priority { + WMI_HOST_HW_MODE_DBS_SBS_PRI, + WMI_HOST_HW_MODE_DBS_PRI, + WMI_HOST_HW_MODE_DBS_OR_SBS_PRI, + WMI_HOST_HW_MODE_SBS_PRI, + WMI_HOST_HW_MODE_SBS_PASSIVE_PRI, + WMI_HOST_HW_MODE_SINGLE_PRI, + + /* keep last the lowest priority */ + WMI_HOST_HW_MODE_MAX_PRI +}; + +enum WMI_HOST_WLAN_BAND { + WMI_HOST_WLAN_2G_CAP = 1, + WMI_HOST_WLAN_5G_CAP = 2, + WMI_HOST_WLAN_2G_5G_CAP = 3, +}; + +enum wmi_cmd_group { + /* 0 to 2 are reserved */ + WMI_GRP_START = 0x3, + WMI_GRP_SCAN = WMI_GRP_START, + WMI_GRP_PDEV = 0x4, + WMI_GRP_VDEV = 0x5, + WMI_GRP_PEER = 0x6, + WMI_GRP_MGMT = 0x7, + WMI_GRP_BA_NEG = 0x8, + WMI_GRP_STA_PS = 0x9, + WMI_GRP_DFS = 0xa, + WMI_GRP_ROAM = 0xb, + WMI_GRP_OFL_SCAN = 0xc, + WMI_GRP_P2P = 0xd, + WMI_GRP_AP_PS = 0xe, + WMI_GRP_RATE_CTRL = 0xf, + WMI_GRP_PROFILE = 0x10, + WMI_GRP_SUSPEND = 0x11, + WMI_GRP_BCN_FILTER = 0x12, + WMI_GRP_WOW = 0x13, + WMI_GRP_RTT = 0x14, + WMI_GRP_SPECTRAL = 0x15, + WMI_GRP_STATS = 0x16, + WMI_GRP_ARP_NS_OFL = 0x17, + WMI_GRP_NLO_OFL = 0x18, + WMI_GRP_GTK_OFL = 0x19, + WMI_GRP_CSA_OFL = 0x1a, + WMI_GRP_CHATTER = 0x1b, + WMI_GRP_TID_ADDBA = 0x1c, + WMI_GRP_MISC = 0x1d, + WMI_GRP_GPIO = 0x1e, + WMI_GRP_FWTEST = 0x1f, + WMI_GRP_TDLS = 0x20, + WMI_GRP_RESMGR = 0x21, + WMI_GRP_STA_SMPS = 0x22, + WMI_GRP_WLAN_HB = 0x23, + WMI_GRP_RMC = 0x24, + WMI_GRP_MHF_OFL = 0x25, + WMI_GRP_LOCATION_SCAN = 0x26, + WMI_GRP_OEM = 0x27, + WMI_GRP_NAN = 0x28, + WMI_GRP_COEX = 0x29, + WMI_GRP_OBSS_OFL = 0x2a, + WMI_GRP_LPI = 0x2b, + WMI_GRP_EXTSCAN = 0x2c, + WMI_GRP_DHCP_OFL = 0x2d, + WMI_GRP_IPA = 0x2e, + WMI_GRP_MDNS_OFL = 0x2f, + WMI_GRP_SAP_OFL = 0x30, + WMI_GRP_OCB = 0x31, + WMI_GRP_SOC = 0x32, + WMI_GRP_PKT_FILTER = 0x33, + WMI_GRP_MAWC = 0x34, + WMI_GRP_PMF_OFFLOAD = 0x35, + WMI_GRP_BPF_OFFLOAD = 0x36, + WMI_GRP_NAN_DATA = 0x37, + WMI_GRP_PROTOTYPE = 0x38, + WMI_GRP_MONITOR = 0x39, + WMI_GRP_REGULATORY = 0x3a, + WMI_GRP_HW_DATA_FILTER = 0x3b, + WMI_GRP_WLM = 0x3c, + WMI_GRP_11K_OFFLOAD = 0x3d, + WMI_GRP_TWT = 0x3e, + WMI_GRP_MOTION_DET = 0x3f, + WMI_GRP_SPATIAL_REUSE = 0x40, +}; + +#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) + +enum wmi_tlv_cmd_id { + WMI_CMD_UNSUPPORTED = 0, + WMI_INIT_CMDID = 0x1, + WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN), + WMI_STOP_SCAN_CMDID, + WMI_SCAN_CHAN_LIST_CMDID, + WMI_SCAN_SCH_PRIO_TBL_CMDID, + WMI_SCAN_UPDATE_REQUEST_CMDID, + WMI_SCAN_PROB_REQ_OUI_CMDID, + WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID, + WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV), + WMI_PDEV_SET_CHANNEL_CMDID, + WMI_PDEV_SET_PARAM_CMDID, + WMI_PDEV_PKTLOG_ENABLE_CMDID, + WMI_PDEV_PKTLOG_DISABLE_CMDID, + WMI_PDEV_SET_WMM_PARAMS_CMDID, + WMI_PDEV_SET_HT_CAP_IE_CMDID, + WMI_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_PDEV_SET_QUIET_MODE_CMDID, + WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_PDEV_GET_TPC_CONFIG_CMDID, + WMI_PDEV_SET_BASE_MACADDR_CMDID, + WMI_PDEV_DUMP_CMDID, + WMI_PDEV_SET_LED_CONFIG_CMDID, + WMI_PDEV_GET_TEMPERATURE_CMDID, + WMI_PDEV_SET_LED_FLASHING_CMDID, + WMI_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_PDEV_SET_CTL_TABLE_CMDID, + WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_PDEV_FIPS_CMDID, + WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID, + WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + WMI_PDEV_GET_NFCAL_POWER_CMDID, + WMI_PDEV_GET_TPC_CMDID, + WMI_MIB_STATS_ENABLE_CMDID, + WMI_PDEV_SET_PCL_CMDID, + WMI_PDEV_SET_HW_MODE_CMDID, + WMI_PDEV_SET_MAC_CONFIG_CMDID, + WMI_PDEV_SET_ANTENNA_MODE_CMDID, + WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID, + WMI_PDEV_WAL_POWER_DEBUG_CMDID, + WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID, + WMI_PDEV_SET_WAKEUP_CONFIG_CMDID, + WMI_PDEV_GET_ANTDIV_STATUS_CMDID, + WMI_PDEV_GET_CHIP_POWER_STATS_CMDID, + WMI_PDEV_SET_STATS_THRESHOLD_CMDID, + WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID, + WMI_PDEV_UPDATE_PKT_ROUTING_CMDID, + WMI_PDEV_CHECK_CAL_VERSION_CMDID, + WMI_PDEV_SET_DIVERSITY_GAIN_CMDID, + WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID, + WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_PDEV_UPDATE_PMK_CACHE_CMDID, + WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID, + WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID, + WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID, + WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID, + WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID, + WMI_PDEV_DMA_RING_CFG_REQ_CMDID, + WMI_PDEV_HE_TB_ACTION_FRM_CMDID, + WMI_PDEV_PKTLOG_FILTER_CMDID, + WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV), + WMI_VDEV_DELETE_CMDID, + WMI_VDEV_START_REQUEST_CMDID, + WMI_VDEV_RESTART_REQUEST_CMDID, + WMI_VDEV_UP_CMDID, + WMI_VDEV_STOP_CMDID, + WMI_VDEV_DOWN_CMDID, + WMI_VDEV_SET_PARAM_CMDID, + WMI_VDEV_INSTALL_KEY_CMDID, + WMI_VDEV_WNM_SLEEPMODE_CMDID, + WMI_VDEV_WMM_ADDTS_CMDID, + WMI_VDEV_WMM_DELTS_CMDID, + WMI_VDEV_SET_WMM_PARAMS_CMDID, + WMI_VDEV_SET_GTX_PARAMS_CMDID, + WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID, + WMI_VDEV_PLMREQ_START_CMDID, + WMI_VDEV_PLMREQ_STOP_CMDID, + WMI_VDEV_TSF_TSTAMP_ACTION_CMDID, + WMI_VDEV_SET_IE_CMDID, + WMI_VDEV_RATEMASK_CMDID, + WMI_VDEV_ATF_REQUEST_CMDID, + WMI_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + WMI_VDEV_SET_QUIET_MODE_CMDID, + WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID, + WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID, + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID, + WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER), + WMI_PEER_DELETE_CMDID, + WMI_PEER_FLUSH_TIDS_CMDID, + WMI_PEER_SET_PARAM_CMDID, + WMI_PEER_ASSOC_CMDID, + WMI_PEER_ADD_WDS_ENTRY_CMDID, + WMI_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_PEER_MCAST_GROUP_CMDID, + WMI_PEER_INFO_REQ_CMDID, + WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID, + WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID, + WMI_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID, + WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_PEER_ATF_REQUEST_CMDID, + WMI_PEER_BWF_REQUEST_CMDID, + WMI_PEER_REORDER_QUEUE_SETUP_CMDID, + WMI_PEER_REORDER_QUEUE_REMOVE_CMDID, + WMI_PEER_SET_RX_BLOCKSIZE_CMDID, + WMI_PEER_ANTDIV_INFO_REQ_CMDID, + WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT), + WMI_PDEV_SEND_BCN_CMDID, + WMI_BCN_TMPL_CMDID, + WMI_BCN_FILTER_RX_CMDID, + WMI_PRB_REQ_FILTER_RX_CMDID, + WMI_MGMT_TX_CMDID, + WMI_PRB_TMPL_CMDID, + WMI_MGMT_TX_SEND_CMDID, + WMI_OFFCHAN_DATA_TX_SEND_CMDID, + WMI_PDEV_SEND_FD_CMDID, + WMI_BCN_OFFLOAD_CTRL_CMDID, + WMI_BSS_COLOR_CHANGE_ENABLE_CMDID, + WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID, + WMI_FILS_DISCOVERY_TMPL_CMDID, + WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG), + WMI_ADDBA_SEND_CMDID, + WMI_ADDBA_STATUS_CMDID, + WMI_DELBA_SEND_CMDID, + WMI_ADDBA_SET_RESP_CMDID, + WMI_SEND_SINGLEAMSDU_CMDID, + WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS), + WMI_STA_POWERSAVE_PARAM_CMDID, + WMI_STA_MIMO_PS_MODE_CMDID, + WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS), + WMI_PDEV_DFS_DISABLE_CMDID, + WMI_DFS_PHYERR_FILTER_ENA_CMDID, + WMI_DFS_PHYERR_FILTER_DIS_CMDID, + WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID, + WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID, + WMI_VDEV_ADFS_CH_CFG_CMDID, + WMI_VDEV_ADFS_OCAC_ABORT_CMDID, + WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM), + WMI_ROAM_SCAN_RSSI_THRESHOLD, + WMI_ROAM_SCAN_PERIOD, + WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_ROAM_AP_PROFILE, + WMI_ROAM_CHAN_LIST, + WMI_ROAM_SCAN_CMD, + WMI_ROAM_SYNCH_COMPLETE, + WMI_ROAM_SET_RIC_REQUEST_CMDID, + WMI_ROAM_INVOKE_CMDID, + WMI_ROAM_FILTER_CMDID, + WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID, + WMI_ROAM_CONFIGURE_MAWC_CMDID, + WMI_ROAM_SET_MBO_PARAM_CMDID, + WMI_ROAM_PER_CONFIG_CMDID, + WMI_ROAM_BTM_CONFIG_CMDID, + WMI_ENABLE_FILS_CMDID, + WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN), + WMI_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_OFL_SCAN_PERIOD, + WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P), + WMI_P2P_DEV_SET_DISCOVERABILITY, + WMI_P2P_GO_SET_BEACON_IE, + WMI_P2P_GO_SET_PROBE_RESP_IE, + WMI_P2P_SET_VENDOR_IE_DATA_CMDID, + WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID, + WMI_P2P_DISC_OFFLOAD_APPIE_CMDID, + WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID, + WMI_P2P_SET_OPPPS_PARAM_CMDID, + WMI_P2P_LISTEN_OFFLOAD_START_CMDID, + WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID, + WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS), + WMI_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_AP_PS_EGAP_PARAM_CMDID, + WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL), + WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE), + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND), + WMI_PDEV_RESUME_CMDID, + WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER), + WMI_RMV_BCN_FILTER_CMDID, + WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW), + WMI_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_WOW_ENABLE_CMDID, + WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID, + WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID, + WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID, + WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID, + WMI_D0_WOW_ENABLE_DISABLE_CMDID, + WMI_EXTWOW_ENABLE_CMDID, + WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID, + WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID, + WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID, + WMI_WOW_UDP_SVC_OFLD_CMDID, + WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID, + WMI_WOW_SET_ACTION_WAKE_UP_CMDID, + WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT), + WMI_RTT_TSF_CMDID, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL), + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS), + WMI_MCC_SCHED_TRAFFIC_STATS_CMDID, + WMI_REQUEST_STATS_EXT_CMDID, + WMI_REQUEST_LINK_STATS_CMDID, + WMI_START_LINK_STATS_CMDID, + WMI_CLEAR_LINK_STATS_CMDID, + WMI_GET_FW_MEM_DUMP_CMDID, + WMI_DEBUG_MESG_FLUSH_CMDID, + WMI_DIAG_EVENT_LOG_CONFIG_CMDID, + WMI_REQUEST_WLAN_STATS_CMDID, + WMI_REQUEST_RCPI_CMDID, + WMI_REQUEST_PEER_STATS_INFO_CMDID, + WMI_REQUEST_RADIO_CHAN_STATS_CMDID, + WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL), + WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, + WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL), + WMI_APFIND_CMDID, + WMI_PASSPOINT_LIST_CONFIG_CMDID, + WMI_NLO_CONFIGURE_MAWC_CMDID, + WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL), + WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL), + WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, + WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER), + WMI_CHATTER_ADD_COALESCING_FILTER_CMDID, + WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID, + WMI_CHATTER_COALESCING_QUERY_CMDID, + WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA), + WMI_PEER_TID_DELBA_CMDID, + WMI_STA_DTIM_PS_METHOD_CMDID, + WMI_STA_UAPSD_AUTO_TRIG_CMDID, + WMI_STA_KEEPALIVE_CMDID, + WMI_BA_REQ_SSN_CMDID, + WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC), + WMI_PDEV_UTF_CMDID, + WMI_DBGLOG_CFG_CMDID, + WMI_PDEV_QVIT_CMDID, + WMI_PDEV_FTM_INTG_CMDID, + WMI_VDEV_SET_KEEPALIVE_CMDID, + WMI_VDEV_GET_KEEPALIVE_CMDID, + WMI_FORCE_FW_HANG_CMDID, + WMI_SET_MCASTBCAST_FILTER_CMDID, + WMI_THERMAL_MGMT_CMDID, + WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID, + WMI_TPC_CHAINMASK_CONFIG_CMDID, + WMI_SET_ANTENNA_DIVERSITY_CMDID, + WMI_OCB_SET_SCHED_CMDID, + WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID, + WMI_LRO_CONFIG_CMDID, + WMI_TRANSFER_DATA_TO_FLASH_CMDID, + WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID, + WMI_VDEV_WISA_CMDID, + WMI_DBGLOG_TIME_STAMP_SYNC_CMDID, + WMI_SET_MULTIPLE_MCAST_FILTER_CMDID, + WMI_READ_DATA_FROM_FLASH_CMDID, + WMI_THERM_THROT_SET_CONF_CMDID, + WMI_RUNTIME_DPD_RECAL_CMDID, + WMI_GET_TPC_POWER_CMDID, + WMI_IDLE_TRIGGER_MONITOR_CMDID, + WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO), + WMI_GPIO_OUTPUT_CMDID, + WMI_TXBF_CMDID, + WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST), + WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID, + WMI_UNIT_TEST_CMDID, + WMI_FWTEST_CMDID, + WMI_QBOOST_CFG_CMDID, + WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS), + WMI_TDLS_PEER_UPDATE_CMDID, + WMI_TDLS_SET_OFFCHAN_MODE_CMDID, + WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR), + WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID, + WMI_RESMGR_SET_CHAN_LATENCY_CMDID, + WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS), + WMI_STA_SMPS_PARAM_CMDID, + WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB), + WMI_HB_SET_TCP_PARAMS_CMDID, + WMI_HB_SET_TCP_PKT_FILTER_CMDID, + WMI_HB_SET_UDP_PARAMS_CMDID, + WMI_HB_SET_UDP_PKT_FILTER_CMDID, + WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC), + WMI_RMC_SET_ACTION_PERIOD_CMDID, + WMI_RMC_CONFIG_CMDID, + WMI_RMC_SET_MANUAL_LEADER_CMDID, + WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL), + WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID, + WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN), + WMI_BATCH_SCAN_DISABLE_CMDID, + WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID, + WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM), + WMI_OEM_REQUEST_CMDID, + WMI_LPI_OEM_REQ_CMDID, + WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN), + WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX), + WMI_CHAN_AVOID_UPDATE_CMDID, + WMI_COEX_CONFIG_CMDID, + WMI_CHAN_AVOID_RPT_ALLOW_CMDID, + WMI_COEX_GET_ANTENNA_ISOLATION_CMDID, + WMI_SAR_LIMITS_CMDID, + WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL), + WMI_OBSS_SCAN_DISABLE_CMDID, + WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID, + WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI), + WMI_LPI_START_SCAN_CMDID, + WMI_LPI_STOP_SCAN_CMDID, + WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN), + WMI_EXTSCAN_STOP_CMDID, + WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID, + WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID, + WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID, + WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID, + WMI_EXTSCAN_SET_CAPABILITIES_CMDID, + WMI_EXTSCAN_GET_CAPABILITIES_CMDID, + WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID, + WMI_EXTSCAN_CONFIGURE_MAWC_CMDID, + WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL), + WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA), + WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), + WMI_MDNS_SET_FQDN_CMDID, + WMI_MDNS_SET_RESPONSE_CMDID, + WMI_MDNS_GET_STATS_CMDID, + WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), + WMI_SAP_SET_BLACKLIST_PARAM_CMDID, + WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB), + WMI_OCB_SET_UTC_TIME_CMDID, + WMI_OCB_START_TIMING_ADVERT_CMDID, + WMI_OCB_STOP_TIMING_ADVERT_CMDID, + WMI_OCB_GET_TSF_TIMER_CMDID, + WMI_DCC_GET_STATS_CMDID, + WMI_DCC_CLEAR_STATS_CMDID, + WMI_DCC_UPDATE_NDL_CMDID, + WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC), + WMI_SOC_SET_HW_MODE_CMDID, + WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID, + WMI_SOC_SET_ANTENNA_MODE_CMDID, + WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER), + WMI_PACKET_FILTER_ENABLE_CMDID, + WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC), + WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD), + WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD), + WMI_BPF_GET_VDEV_STATS_CMDID, + WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID, + WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID, + WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID, + WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR), + WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY), + WMI_11D_SCAN_START_CMDID, + WMI_11D_SCAN_STOP_CMDID, + WMI_SET_INIT_COUNTRY_CMDID, + WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE), + WMI_NDP_INITIATOR_REQ_CMDID, + WMI_NDP_RESPONDER_REQ_CMDID, + WMI_NDP_END_REQ_CMDID, + WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER), + WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT), + WMI_TWT_DISABLE_CMDID, + WMI_TWT_ADD_DIALOG_CMDID, + WMI_TWT_DEL_DIALOG_CMDID, + WMI_TWT_PAUSE_DIALOG_CMDID, + WMI_TWT_RESUME_DIALOG_CMDID, + WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID = + WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE), + WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID, +}; + +enum wmi_tlv_event_id { + WMI_SERVICE_READY_EVENTID = 0x1, + WMI_READY_EVENTID, + WMI_SERVICE_AVAILABLE_EVENTID, + WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), + WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV), + WMI_CHAN_INFO_EVENTID, + WMI_PHYERR_EVENTID, + WMI_PDEV_DUMP_EVENTID, + WMI_TX_PAUSE_EVENTID, + WMI_DFS_RADAR_EVENTID, + WMI_PDEV_L1SS_TRACK_EVENTID, + WMI_PDEV_TEMPERATURE_EVENTID, + WMI_SERVICE_READY_EXT_EVENTID, + WMI_PDEV_FIPS_EVENTID, + WMI_PDEV_CHANNEL_HOPPING_EVENTID, + WMI_PDEV_ANI_CCK_LEVEL_EVENTID, + WMI_PDEV_ANI_OFDM_LEVEL_EVENTID, + WMI_PDEV_TPC_EVENTID, + WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, + WMI_PDEV_SET_HW_MODE_RESP_EVENTID, + WMI_PDEV_HW_MODE_TRANSITION_EVENTID, + WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID, + WMI_PDEV_ANTDIV_STATUS_EVENTID, + WMI_PDEV_CHIP_POWER_STATS_EVENTID, + WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID, + WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID, + WMI_PDEV_CHECK_CAL_VERSION_EVENTID, + WMI_PDEV_DIV_RSSI_ANTID_EVENTID, + WMI_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_PDEV_UPDATE_CTLTABLE_EVENTID, + WMI_PDEV_DMA_RING_CFG_RSP_EVENTID, + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID, + WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID, + WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID, + WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID, + WMI_PDEV_RAP_INFO_EVENTID, + WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID, + WMI_SERVICE_READY_EXT2_EVENTID, + WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV), + WMI_VDEV_STOPPED_EVENTID, + WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, + WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID, + WMI_VDEV_TSF_REPORT_EVENTID, + WMI_VDEV_DELETE_RESP_EVENTID, + WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID, + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID, + WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER), + WMI_PEER_INFO_EVENTID, + WMI_PEER_TX_FAIL_CNT_THR_EVENTID, + WMI_PEER_ESTIMATED_LINKSPEED_EVENTID, + WMI_PEER_STATE_EVENTID, + WMI_PEER_ASSOC_CONF_EVENTID, + WMI_PEER_DELETE_RESP_EVENTID, + WMI_PEER_RATECODE_LIST_EVENTID, + WMI_WDS_PEER_EVENTID, + WMI_PEER_STA_PS_STATECHG_EVENTID, + WMI_PEER_ANTDIV_INFO_EVENTID, + WMI_PEER_RESERVED0_EVENTID, + WMI_PEER_RESERVED1_EVENTID, + WMI_PEER_RESERVED2_EVENTID, + WMI_PEER_RESERVED3_EVENTID, + WMI_PEER_RESERVED4_EVENTID, + WMI_PEER_RESERVED5_EVENTID, + WMI_PEER_RESERVED6_EVENTID, + WMI_PEER_RESERVED7_EVENTID, + WMI_PEER_RESERVED8_EVENTID, + WMI_PEER_RESERVED9_EVENTID, + WMI_PEER_RESERVED10_EVENTID, + WMI_PEER_OPER_MODE_CHANGE_EVENTID, + WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT), + WMI_HOST_SWBA_EVENTID, + WMI_TBTTOFFSET_UPDATE_EVENTID, + WMI_OFFLOAD_BCN_TX_STATUS_EVENTID, + WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID, + WMI_MGMT_TX_COMPLETION_EVENTID, + WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID, + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID, + WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID, + WMI_HOST_FILS_DISCOVERY_EVENTID, + WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG), + WMI_TX_ADDBA_COMPLETE_EVENTID, + WMI_BA_RSP_SSN_EVENTID, + WMI_AGGR_STATE_TRIG_EVENTID, + WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM), + WMI_PROFILE_MATCH, + WMI_ROAM_SYNCH_EVENTID, + WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P), + WMI_P2P_NOA_EVENTID, + WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID, + WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS), + WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND), + WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW), + WMI_D0_WOW_DISABLE_ACK_EVENTID, + WMI_WOW_INITIAL_WAKEUP_EVENTID, + WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT), + WMI_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_RTT_ERROR_REPORT_EVENTID, + WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS), + WMI_IFACE_LINK_STATS_EVENTID, + WMI_PEER_LINK_STATS_EVENTID, + WMI_RADIO_LINK_STATS_EVENTID, + WMI_UPDATE_FW_MEM_DUMP_EVENTID, + WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID, + WMI_INST_RSSI_STATS_EVENTID, + WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID, + WMI_REPORT_STATS_EVENTID, + WMI_UPDATE_RCPI_EVENTID, + WMI_PEER_STATS_INFO_EVENTID, + WMI_RADIO_CHAN_STATS_EVENTID, + WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL), + WMI_NLO_SCAN_COMPLETE_EVENTID, + WMI_APFIND_EVENTID, + WMI_PASSPOINT_MATCH_EVENTID, + WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL), + WMI_GTK_REKEY_FAIL_EVENTID, + WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL), + WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER), + WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS), + WMI_VDEV_DFS_CAC_COMPLETE_EVENTID, + WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID, + WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC), + WMI_PDEV_UTF_EVENTID, + WMI_DEBUG_MESG_EVENTID, + WMI_UPDATE_STATS_EVENTID, + WMI_DEBUG_PRINT_EVENTID, + WMI_DCS_INTERFERENCE_EVENTID, + WMI_PDEV_QVIT_EVENTID, + WMI_WLAN_PROFILE_DATA_EVENTID, + WMI_PDEV_FTM_INTG_EVENTID, + WMI_WLAN_FREQ_AVOID_EVENTID, + WMI_VDEV_GET_KEEPALIVE_EVENTID, + WMI_THERMAL_MGMT_EVENTID, + WMI_DIAG_DATA_CONTAINER_EVENTID, + WMI_HOST_AUTO_SHUTDOWN_EVENTID, + WMI_UPDATE_WHAL_MIB_STATS_EVENTID, + WMI_UPDATE_VDEV_RATE_STATS_EVENTID, + WMI_DIAG_EVENTID, + WMI_OCB_SET_SCHED_EVENTID, + WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID, + WMI_RSSI_BREACH_EVENTID, + WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID, + WMI_PDEV_UTF_SCPC_EVENTID, + WMI_READ_DATA_FROM_FLASH_EVENTID, + WMI_REPORT_RX_AGGR_FAILURE_EVENTID, + WMI_PKGID_EVENTID, + WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO), + WMI_UPLOADH_EVENTID, + WMI_CAPTUREH_EVENTID, + WMI_RFKILL_STATE_CHANGE_EVENTID, + WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS), + WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS), + WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN), + WMI_BATCH_SCAN_RESULT_EVENTID, + WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM), + WMI_OEM_MEASUREMENT_REPORT_EVENTID, + WMI_OEM_ERROR_REPORT_EVENTID, + WMI_OEM_RESPONSE_EVENTID, + WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN), + WMI_NAN_DISC_IFACE_CREATED_EVENTID, + WMI_NAN_DISC_IFACE_DELETED_EVENTID, + WMI_NAN_STARTED_CLUSTER_EVENTID, + WMI_NAN_JOINED_CLUSTER_EVENTID, + WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX), + WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI), + WMI_LPI_STATUS_EVENTID, + WMI_LPI_HANDOFF_EVENTID, + WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN), + WMI_EXTSCAN_OPERATION_EVENTID, + WMI_EXTSCAN_TABLE_USAGE_EVENTID, + WMI_EXTSCAN_CACHED_RESULTS_EVENTID, + WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID, + WMI_EXTSCAN_HOTLIST_MATCH_EVENTID, + WMI_EXTSCAN_CAPABILITIES_EVENTID, + WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID, + WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), + WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), + WMI_SAP_OFL_DEL_STA_EVENTID, + WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB), + WMI_OCB_GET_TSF_TIMER_RESP_EVENTID, + WMI_DCC_GET_STATS_RESP_EVENTID, + WMI_DCC_UPDATE_NDL_RESP_EVENTID, + WMI_DCC_STATS_EVENTID, + WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC), + WMI_SOC_HW_MODE_TRANSITION_EVENTID, + WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID, + WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC), + WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD), + WMI_BPF_VDEV_STATS_INFO_EVENTID, + WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC), + WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY), + WMI_11D_NEW_COUNTRY_EVENTID, + WMI_REG_CHAN_LIST_CC_EXT_EVENTID, + WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE), + WMI_NDP_INITIATOR_RSP_EVENTID, + WMI_NDP_RESPONDER_RSP_EVENTID, + WMI_NDP_END_RSP_EVENTID, + WMI_NDP_INDICATION_EVENTID, + WMI_NDP_CONFIRM_EVENTID, + WMI_NDP_END_INDICATION_EVENTID, + + WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT), + WMI_TWT_DISABLE_EVENTID, + WMI_TWT_ADD_DIALOG_EVENTID, + WMI_TWT_DEL_DIALOG_EVENTID, + WMI_TWT_PAUSE_DIALOG_EVENTID, + WMI_TWT_RESUME_DIALOG_EVENTID, +}; + +enum wmi_tlv_pdev_param { + WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + WMI_PDEV_PARAM_RX_CHAIN_MASK, + WMI_PDEV_PARAM_TXPOWER_LIMIT2G, + WMI_PDEV_PARAM_TXPOWER_LIMIT5G, + WMI_PDEV_PARAM_TXPOWER_SCALE, + WMI_PDEV_PARAM_BEACON_GEN_MODE, + WMI_PDEV_PARAM_BEACON_TX_MODE, + WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + WMI_PDEV_PARAM_PROTECTION_MODE, + WMI_PDEV_PARAM_DYNAMIC_BW, + WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_PDEV_PARAM_AGG_SW_RETRY_TH, + WMI_PDEV_PARAM_STA_KICKOUT_TH, + WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, + WMI_PDEV_PARAM_LTR_ENABLE, + WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, + WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, + WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, + WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, + WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + WMI_PDEV_PARAM_LTR_RX_OVERRIDE, + WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + WMI_PDEV_PARAM_L1SS_ENABLE, + WMI_PDEV_PARAM_DSLEEP_ENABLE, + WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + WMI_PDEV_PARAM_PMF_QOS, + WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + WMI_PDEV_PARAM_DCS, + WMI_PDEV_PARAM_ANI_ENABLE, + WMI_PDEV_PARAM_ANI_POLL_PERIOD, + WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, + WMI_PDEV_PARAM_ANI_OFDM_LEVEL, + WMI_PDEV_PARAM_ANI_CCK_LEVEL, + WMI_PDEV_PARAM_DYNTXCHAIN, + WMI_PDEV_PARAM_PROXY_STA, + WMI_PDEV_PARAM_IDLE_PS_CONFIG, + WMI_PDEV_PARAM_POWER_GATING_SLEEP, + WMI_PDEV_PARAM_RFKILL_ENABLE, + WMI_PDEV_PARAM_BURST_DUR, + WMI_PDEV_PARAM_BURST_ENABLE, + WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE, + WMI_PDEV_PARAM_L1SS_TRACK, + WMI_PDEV_PARAM_HYST_EN, + WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE, + WMI_PDEV_PARAM_LED_SYS_STATE, + WMI_PDEV_PARAM_LED_ENABLE, + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY, + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE, + WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE, + WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD, + WMI_PDEV_PARAM_CTS_CBW, + WMI_PDEV_PARAM_WNTS_CONFIG, + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE, + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP, + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP, + WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP, + WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE, + WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT, + WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP, + WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT, + WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE, + WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE, + WMI_PDEV_PARAM_TX_CHAIN_MASK_2G, + WMI_PDEV_PARAM_RX_CHAIN_MASK_2G, + WMI_PDEV_PARAM_TX_CHAIN_MASK_5G, + WMI_PDEV_PARAM_RX_CHAIN_MASK_5G, + WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK, + WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS, + WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG, + WMI_PDEV_PARAM_TXPOWER_DECR_DB, + WMI_PDEV_PARAM_AGGR_BURST, + WMI_PDEV_PARAM_RX_DECAP_MODE, + WMI_PDEV_PARAM_FAST_CHANNEL_RESET, + WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_PDEV_PARAM_ANTENNA_GAIN, + WMI_PDEV_PARAM_RX_FILTER, + WMI_PDEV_SET_MCAST_TO_UCAST_TID, + WMI_PDEV_PARAM_PROXY_STA_MODE, + WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE, + WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + WMI_PDEV_PARAM_BLOCK_INTERBSS, + WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID, + WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_PDEV_PARAM_SET_BURST_MODE_CMDID, + WMI_PDEV_PARAM_EN_STATS, + WMI_PDEV_PARAM_MU_GROUP_POLICY, + WMI_PDEV_PARAM_NOISE_DETECTION, + WMI_PDEV_PARAM_NOISE_THRESHOLD, + WMI_PDEV_PARAM_DPD_ENABLE, + WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + WMI_PDEV_PARAM_ATF_STRICT_SCH, + WMI_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_PDEV_PARAM_ANT_PLZN, + WMI_PDEV_PARAM_MGMT_RETRY_LIMIT, + WMI_PDEV_PARAM_SENSITIVITY_LEVEL, + WMI_PDEV_PARAM_SIGNED_TXPOWER_2G, + WMI_PDEV_PARAM_SIGNED_TXPOWER_5G, + WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + WMI_PDEV_PARAM_CCA_THRESHOLD, + WMI_PDEV_PARAM_RTS_FIXED_RATE, + WMI_PDEV_PARAM_PDEV_RESET, + WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET, + WMI_PDEV_PARAM_ARP_DBG_SRCADDR, + WMI_PDEV_PARAM_ARP_DBG_DSTADDR, + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH, + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, + WMI_PDEV_PARAM_CUST_TXPOWER_SCALE, + WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + WMI_PDEV_PARAM_CTRL_RETRY_LIMIT, + WMI_PDEV_PARAM_PROPAGATION_DELAY, + WMI_PDEV_PARAM_ENA_ANT_DIV, + WMI_PDEV_PARAM_FORCE_CHAIN_ANT, + WMI_PDEV_PARAM_ANT_DIV_SELFTEST, + WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL, + WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD, + WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS, + WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN, + WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN, + WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN, + WMI_PDEV_PARAM_TX_SCH_DELAY, + WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING, + WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU, + WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE, + WMI_PDEV_PARAM_FAST_PWR_TRANSITION, + WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE, + WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE, + WMI_PDEV_PARAM_MESH_MCAST_ENABLE, +}; + +enum wmi_tlv_vdev_param { + WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1, + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + WMI_VDEV_PARAM_BEACON_INTERVAL, + WMI_VDEV_PARAM_LISTEN_INTERVAL, + WMI_VDEV_PARAM_MULTICAST_RATE, + WMI_VDEV_PARAM_MGMT_TX_RATE, + WMI_VDEV_PARAM_SLOT_TIME, + WMI_VDEV_PARAM_PREAMBLE, + WMI_VDEV_PARAM_SWBA_TIME, + WMI_VDEV_STATS_UPDATE_PERIOD, + WMI_VDEV_PWRSAVE_AGEOUT_TIME, + WMI_VDEV_HOST_SWBA_INTERVAL, + WMI_VDEV_PARAM_DTIM_PERIOD, + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + WMI_VDEV_PARAM_WDS, + WMI_VDEV_PARAM_ATIM_WINDOW, + WMI_VDEV_PARAM_BMISS_COUNT_MAX, + WMI_VDEV_PARAM_BMISS_FIRST_BCNT, + WMI_VDEV_PARAM_BMISS_FINAL_BCNT, + WMI_VDEV_PARAM_FEATURE_WMM, + WMI_VDEV_PARAM_CHWIDTH, + WMI_VDEV_PARAM_CHEXTOFFSET, + WMI_VDEV_PARAM_DISABLE_HTPROTECTION, + WMI_VDEV_PARAM_STA_QUICKKICKOUT, + WMI_VDEV_PARAM_MGMT_RATE, + WMI_VDEV_PARAM_PROTECTION_MODE, + WMI_VDEV_PARAM_FIXED_RATE, + WMI_VDEV_PARAM_SGI, + WMI_VDEV_PARAM_LDPC, + WMI_VDEV_PARAM_TX_STBC, + WMI_VDEV_PARAM_RX_STBC, + WMI_VDEV_PARAM_INTRA_BSS_FWD, + WMI_VDEV_PARAM_DEF_KEYID, + WMI_VDEV_PARAM_NSS, + WMI_VDEV_PARAM_BCAST_DATA_RATE, + WMI_VDEV_PARAM_MCAST_DATA_RATE, + WMI_VDEV_PARAM_MCAST_INDICATE, + WMI_VDEV_PARAM_DHCP_INDICATE, + WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + WMI_VDEV_PARAM_AP_ENABLE_NAWDS, + WMI_VDEV_PARAM_ENABLE_RTSCTS, + WMI_VDEV_PARAM_TXBF, + WMI_VDEV_PARAM_PACKET_POWERSAVE, + WMI_VDEV_PARAM_DROP_UNENCRY, + WMI_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP, + WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP, + WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + WMI_VDEV_PARAM_TX_PWRLIMIT, + WMI_VDEV_PARAM_SNR_NUM_FOR_CAL, + WMI_VDEV_PARAM_ROAM_FW_OFFLOAD, + WMI_VDEV_PARAM_ENABLE_RMC, + WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS, + WMI_VDEV_PARAM_MAX_RATE, + WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE, + WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR, + WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT, + WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE, + WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED, + WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED, + WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED, + WMI_VDEV_PARAM_INACTIVITY_CNT, + WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS, + WMI_VDEV_PARAM_DTIM_POLICY, + WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS, + WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE, + WMI_VDEV_PARAM_RX_LEAK_WINDOW, + WMI_VDEV_PARAM_STATS_AVG_FACTOR, + WMI_VDEV_PARAM_DISCONNECT_TH, + WMI_VDEV_PARAM_RTSCTS_RATE, + WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE, + WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE, + WMI_VDEV_PARAM_TXPOWER_SCALE, + WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB, + WMI_VDEV_PARAM_MCAST2UCAST_SET, + WMI_VDEV_PARAM_RC_NUM_RETRIES, + WMI_VDEV_PARAM_CABQ_MAXDUR, + WMI_VDEV_PARAM_MFPTEST_SET, + WMI_VDEV_PARAM_RTS_FIXED_RATE, + WMI_VDEV_PARAM_VHT_SGIMASK, + WMI_VDEV_PARAM_VHT80_RATEMASK, + WMI_VDEV_PARAM_PROXY_STA, + WMI_VDEV_PARAM_VIRTUAL_CELL_MODE, + WMI_VDEV_PARAM_RX_DECAP_TYPE, + WMI_VDEV_PARAM_BW_NSS_RATEMASK, + WMI_VDEV_PARAM_SENSOR_AP, + WMI_VDEV_PARAM_BEACON_RATE, + WMI_VDEV_PARAM_DTIM_ENABLE_CTS, + WMI_VDEV_PARAM_STA_KICKOUT, + WMI_VDEV_PARAM_CAPABILITIES, + WMI_VDEV_PARAM_TSF_INCREMENT, + WMI_VDEV_PARAM_AMPDU_PER_AC, + WMI_VDEV_PARAM_RX_FILTER, + WMI_VDEV_PARAM_MGMT_TX_POWER, + WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_VDEV_PARAM_AGG_SW_RETRY_TH, + WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS, + WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY, + WMI_VDEV_PARAM_HE_DCM, + WMI_VDEV_PARAM_HE_RANGE_EXT, + WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE, + WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME, + WMI_VDEV_PARAM_BA_MODE = 0x7e, + WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87, + WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99, + WMI_VDEV_PARAM_PROTOTYPE = 0x8000, + WMI_VDEV_PARAM_BSS_COLOR, + WMI_VDEV_PARAM_SET_HEMU_MODE, + WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003, +}; + +enum wmi_tlv_peer_flags { + WMI_TLV_PEER_AUTH = 0x00000001, + WMI_TLV_PEER_QOS = 0x00000002, + WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004, + WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010, + WMI_TLV_PEER_APSD = 0x00000800, + WMI_TLV_PEER_HT = 0x00001000, + WMI_TLV_PEER_40MHZ = 0x00002000, + WMI_TLV_PEER_STBC = 0x00008000, + WMI_TLV_PEER_LDPC = 0x00010000, + WMI_TLV_PEER_DYN_MIMOPS = 0x00020000, + WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000, + WMI_TLV_PEER_SPATIAL_MUX = 0x00200000, + WMI_TLV_PEER_VHT = 0x02000000, + WMI_TLV_PEER_80MHZ = 0x04000000, + WMI_TLV_PEER_PMF = 0x08000000, + WMI_PEER_IS_P2P_CAPABLE = 0x20000000, + WMI_PEER_160MHZ = 0x40000000, + WMI_PEER_SAFEMODE_EN = 0x80000000, + +}; + +/** Enum list of TLV Tags for each parameter structure type. */ +enum wmi_tlv_tag { + WMI_TAG_LAST_RESERVED = 15, + WMI_TAG_FIRST_ARRAY_ENUM, + WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM, + WMI_TAG_ARRAY_BYTE, + WMI_TAG_ARRAY_STRUCT, + WMI_TAG_ARRAY_FIXED_STRUCT, + WMI_TAG_LAST_ARRAY_ENUM = 31, + WMI_TAG_SERVICE_READY_EVENT, + WMI_TAG_HAL_REG_CAPABILITIES, + WMI_TAG_WLAN_HOST_MEM_REQ, + WMI_TAG_READY_EVENT, + WMI_TAG_SCAN_EVENT, + WMI_TAG_PDEV_TPC_CONFIG_EVENT, + WMI_TAG_CHAN_INFO_EVENT, + WMI_TAG_COMB_PHYERR_RX_HDR, + WMI_TAG_VDEV_START_RESPONSE_EVENT, + WMI_TAG_VDEV_STOPPED_EVENT, + WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT, + WMI_TAG_PEER_STA_KICKOUT_EVENT, + WMI_TAG_MGMT_RX_HDR, + WMI_TAG_TBTT_OFFSET_EVENT, + WMI_TAG_TX_DELBA_COMPLETE_EVENT, + WMI_TAG_TX_ADDBA_COMPLETE_EVENT, + WMI_TAG_ROAM_EVENT, + WMI_TAG_WOW_EVENT_INFO, + WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP, + WMI_TAG_RTT_EVENT_HEADER, + WMI_TAG_RTT_ERROR_REPORT_EVENT, + WMI_TAG_RTT_MEAS_EVENT, + WMI_TAG_ECHO_EVENT, + WMI_TAG_FTM_INTG_EVENT, + WMI_TAG_VDEV_GET_KEEPALIVE_EVENT, + WMI_TAG_GPIO_INPUT_EVENT, + WMI_TAG_CSA_EVENT, + WMI_TAG_GTK_OFFLOAD_STATUS_EVENT, + WMI_TAG_IGTK_INFO, + WMI_TAG_DCS_INTERFERENCE_EVENT, + WMI_TAG_ATH_DCS_CW_INT, + WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */ + WMI_TAG_ATH_DCS_CW_INT, + WMI_TAG_ATH_DCS_WLAN_INT_STAT, + WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */ + WMI_TAG_ATH_DCS_WLAN_INT_STAT, + WMI_TAG_WLAN_PROFILE_CTX_T, + WMI_TAG_WLAN_PROFILE_T, + WMI_TAG_PDEV_QVIT_EVENT, + WMI_TAG_HOST_SWBA_EVENT, + WMI_TAG_TIM_INFO, + WMI_TAG_P2P_NOA_INFO, + WMI_TAG_STATS_EVENT, + WMI_TAG_AVOID_FREQ_RANGES_EVENT, + WMI_TAG_AVOID_FREQ_RANGE_DESC, + WMI_TAG_GTK_REKEY_FAIL_EVENT, + WMI_TAG_INIT_CMD, + WMI_TAG_RESOURCE_CONFIG, + WMI_TAG_WLAN_HOST_MEMORY_CHUNK, + WMI_TAG_START_SCAN_CMD, + WMI_TAG_STOP_SCAN_CMD, + WMI_TAG_SCAN_CHAN_LIST_CMD, + WMI_TAG_CHANNEL, + WMI_TAG_PDEV_SET_REGDOMAIN_CMD, + WMI_TAG_PDEV_SET_PARAM_CMD, + WMI_TAG_PDEV_SET_WMM_PARAMS_CMD, + WMI_TAG_WMM_PARAMS, + WMI_TAG_PDEV_SET_QUIET_CMD, + WMI_TAG_VDEV_CREATE_CMD, + WMI_TAG_VDEV_DELETE_CMD, + WMI_TAG_VDEV_START_REQUEST_CMD, + WMI_TAG_P2P_NOA_DESCRIPTOR, + WMI_TAG_P2P_GO_SET_BEACON_IE, + WMI_TAG_GTK_OFFLOAD_CMD, + WMI_TAG_VDEV_UP_CMD, + WMI_TAG_VDEV_STOP_CMD, + WMI_TAG_VDEV_DOWN_CMD, + WMI_TAG_VDEV_SET_PARAM_CMD, + WMI_TAG_VDEV_INSTALL_KEY_CMD, + WMI_TAG_PEER_CREATE_CMD, + WMI_TAG_PEER_DELETE_CMD, + WMI_TAG_PEER_FLUSH_TIDS_CMD, + WMI_TAG_PEER_SET_PARAM_CMD, + WMI_TAG_PEER_ASSOC_COMPLETE_CMD, + WMI_TAG_VHT_RATE_SET, + WMI_TAG_BCN_TMPL_CMD, + WMI_TAG_PRB_TMPL_CMD, + WMI_TAG_BCN_PRB_INFO, + WMI_TAG_PEER_TID_ADDBA_CMD, + WMI_TAG_PEER_TID_DELBA_CMD, + WMI_TAG_STA_POWERSAVE_MODE_CMD, + WMI_TAG_STA_POWERSAVE_PARAM_CMD, + WMI_TAG_STA_DTIM_PS_METHOD_CMD, + WMI_TAG_ROAM_SCAN_MODE, + WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD, + WMI_TAG_ROAM_SCAN_PERIOD, + WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_TAG_PDEV_SUSPEND_CMD, + WMI_TAG_PDEV_RESUME_CMD, + WMI_TAG_ADD_BCN_FILTER_CMD, + WMI_TAG_RMV_BCN_FILTER_CMD, + WMI_TAG_WOW_ENABLE_CMD, + WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, + WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD, + WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM, + WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, + WMI_TAG_ARP_OFFLOAD_TUPLE, + WMI_TAG_NS_OFFLOAD_TUPLE, + WMI_TAG_FTM_INTG_CMD, + WMI_TAG_STA_KEEPALIVE_CMD, + WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, + WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD, + WMI_TAG_AP_PS_PEER_CMD, + WMI_TAG_PEER_RATE_RETRY_SCHED_CMD, + WMI_TAG_WLAN_PROFILE_TRIGGER_CMD, + WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD, + WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD, + WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD, + WMI_TAG_WOW_DEL_PATTERN_CMD, + WMI_TAG_WOW_ADD_DEL_EVT_CMD, + WMI_TAG_RTT_MEASREQ_HEAD, + WMI_TAG_RTT_MEASREQ_BODY, + WMI_TAG_RTT_TSF_CMD, + WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, + WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, + WMI_TAG_REQUEST_STATS_CMD, + WMI_TAG_NLO_CONFIG_CMD, + WMI_TAG_NLO_CONFIGURED_PARAMETERS, + WMI_TAG_CSA_OFFLOAD_ENABLE_CMD, + WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD, + WMI_TAG_CHATTER_SET_MODE_CMD, + WMI_TAG_ECHO_CMD, + WMI_TAG_VDEV_SET_KEEPALIVE_CMD, + WMI_TAG_VDEV_GET_KEEPALIVE_CMD, + WMI_TAG_FORCE_FW_HANG_CMD, + WMI_TAG_GPIO_CONFIG_CMD, + WMI_TAG_GPIO_OUTPUT_CMD, + WMI_TAG_PEER_ADD_WDS_ENTRY_CMD, + WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD, + WMI_TAG_BCN_TX_HDR, + WMI_TAG_BCN_SEND_FROM_HOST_CMD, + WMI_TAG_MGMT_TX_HDR, + WMI_TAG_ADDBA_CLEAR_RESP_CMD, + WMI_TAG_ADDBA_SEND_CMD, + WMI_TAG_DELBA_SEND_CMD, + WMI_TAG_ADDBA_SETRESPONSE_CMD, + WMI_TAG_SEND_SINGLEAMSDU_CMD, + WMI_TAG_PDEV_PKTLOG_ENABLE_CMD, + WMI_TAG_PDEV_PKTLOG_DISABLE_CMD, + WMI_TAG_PDEV_SET_HT_IE_CMD, + WMI_TAG_PDEV_SET_VHT_IE_CMD, + WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD, + WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD, + WMI_TAG_PDEV_GET_TPC_CONFIG_CMD, + WMI_TAG_PDEV_SET_BASE_MACADDR_CMD, + WMI_TAG_PEER_MCAST_GROUP_CMD, + WMI_TAG_ROAM_AP_PROFILE, + WMI_TAG_AP_PROFILE, + WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD, + WMI_TAG_PDEV_DFS_ENABLE_CMD, + WMI_TAG_PDEV_DFS_DISABLE_CMD, + WMI_TAG_WOW_ADD_PATTERN_CMD, + WMI_TAG_WOW_BITMAP_PATTERN_T, + WMI_TAG_WOW_IPV4_SYNC_PATTERN_T, + WMI_TAG_WOW_IPV6_SYNC_PATTERN_T, + WMI_TAG_WOW_MAGIC_PATTERN_CMD, + WMI_TAG_SCAN_UPDATE_REQUEST_CMD, + WMI_TAG_CHATTER_PKT_COALESCING_FILTER, + WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD, + WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD, + WMI_TAG_CHATTER_COALESCING_QUERY_CMD, + WMI_TAG_TXBF_CMD, + WMI_TAG_DEBUG_LOG_CONFIG_CMD, + WMI_TAG_NLO_EVENT, + WMI_TAG_CHATTER_QUERY_REPLY_EVENT, + WMI_TAG_UPLOAD_H_HDR, + WMI_TAG_CAPTURE_H_EVENT_HDR, + WMI_TAG_VDEV_WNM_SLEEPMODE_CMD, + WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD, + WMI_TAG_VDEV_WMM_ADDTS_CMD, + WMI_TAG_VDEV_WMM_DELTS_CMD, + WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, + WMI_TAG_TDLS_SET_STATE_CMD, + WMI_TAG_TDLS_PEER_UPDATE_CMD, + WMI_TAG_TDLS_PEER_EVENT, + WMI_TAG_TDLS_PEER_CAPABILITIES, + WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD, + WMI_TAG_ROAM_CHAN_LIST, + WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT, + WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD, + WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD, + WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD, + WMI_TAG_BA_REQ_SSN_CMD, + WMI_TAG_BA_RSP_SSN_EVENT, + WMI_TAG_STA_SMPS_FORCE_MODE_CMD, + WMI_TAG_SET_MCASTBCAST_FILTER_CMD, + WMI_TAG_P2P_SET_OPPPS_CMD, + WMI_TAG_P2P_SET_NOA_CMD, + WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM, + WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM, + WMI_TAG_STA_SMPS_PARAM_CMD, + WMI_TAG_VDEV_SET_GTX_PARAMS_CMD, + WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD, + WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS, + WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT, + WMI_TAG_P2P_NOA_EVENT, + WMI_TAG_HB_SET_ENABLE_CMD, + WMI_TAG_HB_SET_TCP_PARAMS_CMD, + WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD, + WMI_TAG_HB_SET_UDP_PARAMS_CMD, + WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD, + WMI_TAG_HB_IND_EVENT, + WMI_TAG_TX_PAUSE_EVENT, + WMI_TAG_RFKILL_EVENT, + WMI_TAG_DFS_RADAR_EVENT, + WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD, + WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD, + WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST, + WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO, + WMI_TAG_BATCH_SCAN_ENABLE_CMD, + WMI_TAG_BATCH_SCAN_DISABLE_CMD, + WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD, + WMI_TAG_BATCH_SCAN_ENABLED_EVENT, + WMI_TAG_BATCH_SCAN_RESULT_EVENT, + WMI_TAG_VDEV_PLMREQ_START_CMD, + WMI_TAG_VDEV_PLMREQ_STOP_CMD, + WMI_TAG_THERMAL_MGMT_CMD, + WMI_TAG_THERMAL_MGMT_EVENT, + WMI_TAG_PEER_INFO_REQ_CMD, + WMI_TAG_PEER_INFO_EVENT, + WMI_TAG_PEER_INFO, + WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT, + WMI_TAG_RMC_SET_MODE_CMD, + WMI_TAG_RMC_SET_ACTION_PERIOD_CMD, + WMI_TAG_RMC_CONFIG_CMD, + WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD, + WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD, + WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD, + WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD, + WMI_TAG_NAN_CMD_PARAM, + WMI_TAG_NAN_EVENT_HDR, + WMI_TAG_PDEV_L1SS_TRACK_EVENT, + WMI_TAG_DIAG_DATA_CONTAINER_EVENT, + WMI_TAG_MODEM_POWER_STATE_CMD_PARAM, + WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD, + WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT, + WMI_TAG_AGGR_STATE_TRIG_EVENT, + WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY, + WMI_TAG_ROAM_SCAN_CMD, + WMI_TAG_REQ_STATS_EXT_CMD, + WMI_TAG_STATS_EXT_EVENT, + WMI_TAG_OBSS_SCAN_ENABLE_CMD, + WMI_TAG_OBSS_SCAN_DISABLE_CMD, + WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT, + WMI_TAG_PDEV_SET_LED_CONFIG_CMD, + WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD, + WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT, + WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT, + WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM, + WMI_TAG_WOW_IOAC_PKT_PATTERN_T, + WMI_TAG_WOW_IOAC_TMR_PATTERN_T, + WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD, + WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD, + WMI_TAG_WOW_IOAC_KEEPALIVE_T, + WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD, + WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD, + WMI_TAG_START_LINK_STATS_CMD, + WMI_TAG_CLEAR_LINK_STATS_CMD, + WMI_TAG_REQUEST_LINK_STATS_CMD, + WMI_TAG_IFACE_LINK_STATS_EVENT, + WMI_TAG_RADIO_LINK_STATS_EVENT, + WMI_TAG_PEER_STATS_EVENT, + WMI_TAG_CHANNEL_STATS, + WMI_TAG_RADIO_LINK_STATS, + WMI_TAG_RATE_STATS, + WMI_TAG_PEER_LINK_STATS, + WMI_TAG_WMM_AC_STATS, + WMI_TAG_IFACE_LINK_STATS, + WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD, + WMI_TAG_LPI_START_SCAN_CMD, + WMI_TAG_LPI_STOP_SCAN_CMD, + WMI_TAG_LPI_RESULT_EVENT, + WMI_TAG_PEER_STATE_EVENT, + WMI_TAG_EXTSCAN_BUCKET_CMD, + WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT, + WMI_TAG_EXTSCAN_START_CMD, + WMI_TAG_EXTSCAN_STOP_CMD, + WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD, + WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD, + WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD, + WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD, + WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD, + WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD, + WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD, + WMI_TAG_EXTSCAN_OPERATION_EVENT, + WMI_TAG_EXTSCAN_START_STOP_EVENT, + WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT, + WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT, + WMI_TAG_EXTSCAN_RSSI_INFO_EVENT, + WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT, + WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT, + WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT, + WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT, + WMI_TAG_EXTSCAN_CAPABILITIES_EVENT, + WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT, + WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT, + WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT, + WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD, + WMI_TAG_D0_WOW_DISABLE_ACK_EVENT, + WMI_TAG_UNIT_TEST_CMD, + WMI_TAG_ROAM_OFFLOAD_TLV_PARAM, + WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM, + WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM, + WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM, + WMI_TAG_ROAM_SYNCH_EVENT, + WMI_TAG_ROAM_SYNCH_COMPLETE, + WMI_TAG_EXTWOW_ENABLE_CMD, + WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD, + WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD, + WMI_TAG_LPI_STATUS_EVENT, + WMI_TAG_LPI_HANDOFF_EVENT, + WMI_TAG_VDEV_RATE_STATS_EVENT, + WMI_TAG_VDEV_RATE_HT_INFO, + WMI_TAG_RIC_REQUEST, + WMI_TAG_PDEV_GET_TEMPERATURE_CMD, + WMI_TAG_PDEV_TEMPERATURE_EVENT, + WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD, + WMI_TAG_TPC_CHAINMASK_CONFIG_CMD, + WMI_TAG_RIC_TSPEC, + WMI_TAG_TPC_CHAINMASK_CONFIG, + WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD, + WMI_TAG_SCAN_PROB_REQ_OUI_CMD, + WMI_TAG_KEY_MATERIAL, + WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD, + WMI_TAG_SET_LED_FLASHING_CMD, + WMI_TAG_MDNS_OFFLOAD_CMD, + WMI_TAG_MDNS_SET_FQDN_CMD, + WMI_TAG_MDNS_SET_RESP_CMD, + WMI_TAG_MDNS_GET_STATS_CMD, + WMI_TAG_MDNS_STATS_EVENT, + WMI_TAG_ROAM_INVOKE_CMD, + WMI_TAG_PDEV_RESUME_EVENT, + WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD, + WMI_TAG_SAP_OFL_ENABLE_CMD, + WMI_TAG_SAP_OFL_ADD_STA_EVENT, + WMI_TAG_SAP_OFL_DEL_STA_EVENT, + WMI_TAG_APFIND_CMD_PARAM, + WMI_TAG_APFIND_EVENT_HDR, + WMI_TAG_OCB_SET_SCHED_CMD, + WMI_TAG_OCB_SET_SCHED_EVENT, + WMI_TAG_OCB_SET_CONFIG_CMD, + WMI_TAG_OCB_SET_CONFIG_RESP_EVENT, + WMI_TAG_OCB_SET_UTC_TIME_CMD, + WMI_TAG_OCB_START_TIMING_ADVERT_CMD, + WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD, + WMI_TAG_OCB_GET_TSF_TIMER_CMD, + WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT, + WMI_TAG_DCC_GET_STATS_CMD, + WMI_TAG_DCC_CHANNEL_STATS_REQUEST, + WMI_TAG_DCC_GET_STATS_RESP_EVENT, + WMI_TAG_DCC_CLEAR_STATS_CMD, + WMI_TAG_DCC_UPDATE_NDL_CMD, + WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT, + WMI_TAG_DCC_STATS_EVENT, + WMI_TAG_OCB_CHANNEL, + WMI_TAG_OCB_SCHEDULE_ELEMENT, + WMI_TAG_DCC_NDL_STATS_PER_CHANNEL, + WMI_TAG_DCC_NDL_CHAN, + WMI_TAG_QOS_PARAMETER, + WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG, + WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM, + WMI_TAG_ROAM_FILTER, + WMI_TAG_PASSPOINT_CONFIG_CMD, + WMI_TAG_PASSPOINT_EVENT_HDR, + WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD, + WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT, + WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD, + WMI_TAG_VDEV_TSF_REPORT_EVENT, + WMI_TAG_GET_FW_MEM_DUMP, + WMI_TAG_UPDATE_FW_MEM_DUMP, + WMI_TAG_FW_MEM_DUMP_PARAMS, + WMI_TAG_DEBUG_MESG_FLUSH, + WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE, + WMI_TAG_PEER_SET_RATE_REPORT_CONDITION, + WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG, + WMI_TAG_VDEV_SET_IE_CMD, + WMI_TAG_RSSI_BREACH_MONITOR_CONFIG, + WMI_TAG_RSSI_BREACH_EVENT, + WMI_TAG_WOW_EVENT_INITIAL_WAKEUP, + WMI_TAG_SOC_SET_PCL_CMD, + WMI_TAG_SOC_SET_HW_MODE_CMD, + WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT, + WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT, + WMI_TAG_VDEV_TXRX_STREAMS, + WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD, + WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT, + WMI_TAG_WOW_IOAC_SOCK_PATTERN_T, + WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD, + WMI_TAG_DIAG_EVENT_LOG_CONFIG, + WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS, + WMI_TAG_PACKET_FILTER_CONFIG, + WMI_TAG_PACKET_FILTER_ENABLE, + WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD, + WMI_TAG_MGMT_TX_SEND_CMD, + WMI_TAG_MGMT_TX_COMPL_EVENT, + WMI_TAG_SOC_SET_ANTENNA_MODE_CMD, + WMI_TAG_WOW_UDP_SVC_OFLD_CMD, + WMI_TAG_LRO_INFO_CMD, + WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM, + WMI_TAG_SERVICE_READY_EXT_EVENT, + WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD, + WMI_TAG_MAWC_ENABLE_SENSOR_EVENT, + WMI_TAG_ROAM_CONFIGURE_MAWC_CMD, + WMI_TAG_NLO_CONFIGURE_MAWC_CMD, + WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD, + WMI_TAG_PEER_ASSOC_CONF_EVENT, + WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD, + WMI_TAG_AP_PS_EGAP_PARAM_CMD, + WMI_TAG_AP_PS_EGAP_INFO_EVENT, + WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD, + WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD, + WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT, + WMI_TAG_SCPC_EVENT, + WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST, + WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT, + WMI_TAG_BPF_GET_CAPABILITY_CMD, + WMI_TAG_BPF_CAPABILITY_INFO_EVT, + WMI_TAG_BPF_GET_VDEV_STATS_CMD, + WMI_TAG_BPF_VDEV_STATS_INFO_EVT, + WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD, + WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD, + WMI_TAG_VDEV_DELETE_RESP_EVENT, + WMI_TAG_PEER_DELETE_RESP_EVENT, + WMI_TAG_ROAM_DENSE_THRES_PARAM, + WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM, + WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD, + WMI_TAG_VDEV_CONFIG_RATEMASK, + WMI_TAG_PDEV_FIPS_CMD, + WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD, + WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD, + WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD, + WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD, + WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD, + WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD, + WMI_TAG_PDEV_SET_CTL_TABLE_CMD, + WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD, + WMI_TAG_FWTEST_SET_PARAM_CMD, + WMI_TAG_PEER_ATF_REQUEST, + WMI_TAG_VDEV_ATF_REQUEST, + WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD, + WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD, + WMI_TAG_INST_RSSI_STATS_RESP, + WMI_TAG_MED_UTIL_REPORT_EVENT, + WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT, + WMI_TAG_WDS_ADDR_EVENT, + WMI_TAG_PEER_RATECODE_LIST_EVENT, + WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT, + WMI_TAG_PDEV_TPC_EVENT, + WMI_TAG_ANI_OFDM_EVENT, + WMI_TAG_ANI_CCK_EVENT, + WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT, + WMI_TAG_PDEV_FIPS_EVENT, + WMI_TAG_ATF_PEER_INFO, + WMI_TAG_PDEV_GET_TPC_CMD, + WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD, + WMI_TAG_QBOOST_CFG_CMD, + WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE, + WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES, + WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM, + WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN, + WMI_TAG_PEER_CCK_OFDM_RATE_INFO, + WMI_TAG_PEER_MCS_RATE_INFO, + WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR, + WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM, + WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM, + WMI_TAG_MU_REPORT_TOTAL_MU, + WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD, + WMI_TAG_ROAM_SET_MBO, + WMI_TAG_MIB_STATS_ENABLE_CMD, + WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT, + WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT, + WMI_TAG_NAN_STARTED_CLUSTER_EVENT, + WMI_TAG_NAN_JOINED_CLUSTER_EVENT, + WMI_TAG_NDI_GET_CAP_REQ, + WMI_TAG_NDP_INITIATOR_REQ, + WMI_TAG_NDP_RESPONDER_REQ, + WMI_TAG_NDP_END_REQ, + WMI_TAG_NDI_CAP_RSP_EVENT, + WMI_TAG_NDP_INITIATOR_RSP_EVENT, + WMI_TAG_NDP_RESPONDER_RSP_EVENT, + WMI_TAG_NDP_END_RSP_EVENT, + WMI_TAG_NDP_INDICATION_EVENT, + WMI_TAG_NDP_CONFIRM_EVENT, + WMI_TAG_NDP_END_INDICATION_EVENT, + WMI_TAG_VDEV_SET_QUIET_CMD, + WMI_TAG_PDEV_SET_PCL_CMD, + WMI_TAG_PDEV_SET_HW_MODE_CMD, + WMI_TAG_PDEV_SET_MAC_CONFIG_CMD, + WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD, + WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT, + WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT, + WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT, + WMI_TAG_COEX_CONFIG_CMD, + WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER, + WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD, + WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD, + WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD, + WMI_TAG_MAC_PHY_CAPABILITIES, + WMI_TAG_HW_MODE_CAPABILITIES, + WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS, + WMI_TAG_HAL_REG_CAPABILITIES_EXT, + WMI_TAG_SOC_HAL_REG_CAPABILITIES, + WMI_TAG_VDEV_WISA_CMD, + WMI_TAG_TX_POWER_LEVEL_STATS_EVT, + WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV, + WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG, + WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD, + WMI_TAG_NDP_END_RSP_PER_NDI, + WMI_TAG_PEER_BWF_REQUEST, + WMI_TAG_BWF_PEER_INFO, + WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD, + WMI_TAG_RMC_SET_LEADER_CMD, + WMI_TAG_RMC_MANUAL_LEADER_EVENT, + WMI_TAG_PER_CHAIN_RSSI_STATS, + WMI_TAG_RSSI_STATS, + WMI_TAG_P2P_LO_START_CMD, + WMI_TAG_P2P_LO_STOP_CMD, + WMI_TAG_P2P_LO_STOPPED_EVENT, + WMI_TAG_REORDER_QUEUE_SETUP_CMD, + WMI_TAG_REORDER_QUEUE_REMOVE_CMD, + WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD, + WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT, + WMI_TAG_READ_DATA_FROM_FLASH_CMD, + WMI_TAG_READ_DATA_FROM_FLASH_EVENT, + WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD, + WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD, + WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID, + WMI_TAG_TLV_BUF_LEN_PARAM, + WMI_TAG_SERVICE_AVAILABLE_EVENT, + WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD, + WMI_TAG_PEER_ANTDIV_INFO_EVENT, + WMI_TAG_PEER_ANTDIV_INFO, + WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD, + WMI_TAG_PDEV_ANTDIV_STATUS_EVENT, + WMI_TAG_MNT_FILTER_CMD, + WMI_TAG_GET_CHIP_POWER_STATS_CMD, + WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT, + WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD, + WMI_TAG_COEX_REPORT_ISOLATION_EVENT, + WMI_TAG_CHAN_CCA_STATS, + WMI_TAG_PEER_SIGNAL_STATS, + WMI_TAG_TX_STATS, + WMI_TAG_PEER_AC_TX_STATS, + WMI_TAG_RX_STATS, + WMI_TAG_PEER_AC_RX_STATS, + WMI_TAG_REPORT_STATS_EVENT, + WMI_TAG_CHAN_CCA_STATS_THRESH, + WMI_TAG_PEER_SIGNAL_STATS_THRESH, + WMI_TAG_TX_STATS_THRESH, + WMI_TAG_RX_STATS_THRESH, + WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD, + WMI_TAG_REQUEST_WLAN_STATS_CMD, + WMI_TAG_RX_AGGR_FAILURE_EVENT, + WMI_TAG_RX_AGGR_FAILURE_INFO, + WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD, + WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT, + WMI_TAG_PDEV_BAND_TO_MAC, + WMI_TAG_TBTT_OFFSET_INFO, + WMI_TAG_TBTT_OFFSET_EXT_EVENT, + WMI_TAG_SAR_LIMITS_CMD, + WMI_TAG_SAR_LIMIT_CMD_ROW, + WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, + WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD, + WMI_TAG_VDEV_ADFS_CH_CFG_CMD, + WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD, + WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT, + WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT, + WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT, + WMI_TAG_VENDOR_OUI, + WMI_TAG_REQUEST_RCPI_CMD, + WMI_TAG_UPDATE_RCPI_EVENT, + WMI_TAG_REQUEST_PEER_STATS_INFO_CMD, + WMI_TAG_PEER_STATS_INFO, + WMI_TAG_PEER_STATS_INFO_EVENT, + WMI_TAG_PKGID_EVENT, + WMI_TAG_CONNECTED_NLO_RSSI_PARAMS, + WMI_TAG_SET_CURRENT_COUNTRY_CMD, + WMI_TAG_REGULATORY_RULE_STRUCT, + WMI_TAG_REG_CHAN_LIST_CC_EVENT, + WMI_TAG_11D_SCAN_START_CMD, + WMI_TAG_11D_SCAN_STOP_CMD, + WMI_TAG_11D_NEW_COUNTRY_EVENT, + WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD, + WMI_TAG_RADIO_CHAN_STATS, + WMI_TAG_RADIO_CHAN_STATS_EVENT, + WMI_TAG_ROAM_PER_CONFIG, + WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD, + WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT, + WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD, + WMI_TAG_HW_DATA_FILTER_CMD, + WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF, + WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT, + WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED, + WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD, + WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT, + WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD, + WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD, + WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT, + WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD, + WMI_TAG_MAC_PHY_CHAINMASK_COMBO, + WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY, + WMI_TAG_VDEV_SET_ARP_STATS_CMD, + WMI_TAG_VDEV_GET_ARP_STATS_CMD, + WMI_TAG_VDEV_GET_ARP_STATS_EVENT, + WMI_TAG_IFACE_OFFLOAD_STATS, + WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM, + WMI_TAG_RSSI_CTL_EXT, + WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR, + WMI_TAG_COEX_BT_ACTIVITY_EVENT, + WMI_TAG_VDEV_GET_TX_POWER_CMD, + WMI_TAG_VDEV_TX_POWER_EVENT, + WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT, + WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD, + WMI_TAG_TX_SEND_PARAMS, + WMI_TAG_HE_RATE_SET, + WMI_TAG_CONGESTION_STATS, + WMI_TAG_SET_INIT_COUNTRY_CMD, + WMI_TAG_SCAN_DBS_DUTY_CYCLE, + WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV, + WMI_TAG_PDEV_DIV_GET_RSSI_ANTID, + WMI_TAG_THERM_THROT_CONFIG_REQUEST, + WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO, + WMI_TAG_THERM_THROT_STATS_EVENT, + WMI_TAG_THERM_THROT_LEVEL_STATS_INFO, + WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT, + WMI_TAG_OEM_DMA_RING_CAPABILITIES, + WMI_TAG_OEM_DMA_RING_CFG_REQ, + WMI_TAG_OEM_DMA_RING_CFG_RSP, + WMI_TAG_OEM_INDIRECT_DATA, + WMI_TAG_OEM_DMA_BUF_RELEASE, + WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY, + WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, + WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT, + WMI_TAG_ROAM_LCA_DISALLOW_CONFIG, + WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD, + WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG, + WMI_TAG_UNIT_TEST_EVENT, + WMI_TAG_ROAM_FILS_OFFLOAD, + WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD, + WMI_TAG_PMK_CACHE, + WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD, + WMI_TAG_ROAM_FILS_SYNCH, + WMI_TAG_GTK_OFFLOAD_EXTENDED, + WMI_TAG_ROAM_BG_SCAN_ROAMING, + WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD, + WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD, + WMI_TAG_OIC_PING_HANDOFF_EVENT, + WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD, + WMI_TAG_DHCP_LEASE_RENEW_EVENT, + WMI_TAG_BTM_CONFIG, + WMI_TAG_DEBUG_MESG_FW_DATA_STALL, + WMI_TAG_WLM_CONFIG_CMD, + WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST, + WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT, + WMI_TAG_ROAM_CND_SCORING_PARAM, + WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION, + WMI_TAG_VENDOR_OUI_EXT, + WMI_TAG_ROAM_SYNCH_FRAME_EVENT, + WMI_TAG_FD_SEND_FROM_HOST_CMD, + WMI_TAG_ENABLE_FILS_CMD, + WMI_TAG_HOST_SWFDA_EVENT, + WMI_TAG_BCN_OFFLOAD_CTRL_CMD, + WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD, + WMI_TAG_STATS_PERIOD, + WMI_TAG_NDL_SCHEDULE_UPDATE, + WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD, + WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE, + WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD, + WMI_TAG_SAR2_RESULT_EVENT, + WMI_TAG_SAR_CAPABILITIES, + WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD, + WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT, + WMI_TAG_DMA_RING_CAPABILITIES, + WMI_TAG_DMA_RING_CFG_REQ, + WMI_TAG_DMA_RING_CFG_RSP, + WMI_TAG_DMA_BUF_RELEASE, + WMI_TAG_DMA_BUF_RELEASE_ENTRY, + WMI_TAG_SAR_GET_LIMITS_CMD, + WMI_TAG_SAR_GET_LIMITS_EVENT, + WMI_TAG_SAR_GET_LIMITS_EVENT_ROW, + WMI_TAG_OFFLOAD_11K_REPORT, + WMI_TAG_INVOKE_NEIGHBOR_REPORT, + WMI_TAG_NEIGHBOR_REPORT_OFFLOAD, + WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS, + WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS, + WMI_TAG_BPF_SET_VDEV_ENABLE_CMD, + WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD, + WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD, + WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT, + WMI_TAG_PDEV_GET_NFCAL_POWER, + WMI_TAG_BSS_COLOR_CHANGE_ENABLE, + WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, + WMI_TAG_OBSS_COLOR_COLLISION_EVT, + WMI_TAG_RUNTIME_DPD_RECAL_CMD, + WMI_TAG_TWT_ENABLE_CMD, + WMI_TAG_TWT_DISABLE_CMD, + WMI_TAG_TWT_ADD_DIALOG_CMD, + WMI_TAG_TWT_DEL_DIALOG_CMD, + WMI_TAG_TWT_PAUSE_DIALOG_CMD, + WMI_TAG_TWT_RESUME_DIALOG_CMD, + WMI_TAG_TWT_ENABLE_COMPLETE_EVENT, + WMI_TAG_TWT_DISABLE_COMPLETE_EVENT, + WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT, + WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT, + WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT, + WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT, + WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD, + WMI_TAG_ROAM_SCAN_STATS_EVENT, + WMI_TAG_PEER_TID_CONFIGURATIONS_CMD, + WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD, + WMI_TAG_GET_TPC_POWER_CMD, + WMI_TAG_GET_TPC_POWER_EVENT, + WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA, + WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD, + WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD, + WMI_TAG_MOTION_DET_START_STOP_CMD, + WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD, + WMI_TAG_MOTION_DET_EVENT, + WMI_TAG_MOTION_DET_BASE_LINE_EVENT, + WMI_TAG_NDP_TRANSPORT_IP, + WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, + WMI_TAG_ESP_ESTIMATE_EVENT, + WMI_TAG_NAN_HOST_CONFIG, + WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS, + WMI_TAG_PEER_CFR_CAPTURE_CMD, + WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD, + WMI_TAG_CHAN_WIDTH_PEER_LIST, + WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD, + WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD, + WMI_TAG_PEER_EXTD2_STATS, + WMI_TAG_HPCS_PULSE_START_CMD, + WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT, + WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD, + WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD, + WMI_TAG_NAN_EVENT_INFO, + WMI_TAG_NDP_CHANNEL_INFO, + WMI_TAG_NDP_CMD, + WMI_TAG_NDP_EVENT, + /* TODO add all the missing cmds */ + WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301, + WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO, + WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344, + WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F, + WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, + WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, + WMI_TAG_MAX +}; + +enum wmi_tlv_service { + WMI_TLV_SERVICE_BEACON_OFFLOAD = 0, + WMI_TLV_SERVICE_SCAN_OFFLOAD = 1, + WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2, + WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3, + WMI_TLV_SERVICE_STA_PWRSAVE = 4, + WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5, + WMI_TLV_SERVICE_AP_UAPSD = 6, + WMI_TLV_SERVICE_AP_DFS = 7, + WMI_TLV_SERVICE_11AC = 8, + WMI_TLV_SERVICE_BLOCKACK = 9, + WMI_TLV_SERVICE_PHYERR = 10, + WMI_TLV_SERVICE_BCN_FILTER = 11, + WMI_TLV_SERVICE_RTT = 12, + WMI_TLV_SERVICE_WOW = 13, + WMI_TLV_SERVICE_RATECTRL_CACHE = 14, + WMI_TLV_SERVICE_IRAM_TIDS = 15, + WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16, + WMI_TLV_SERVICE_NLO = 17, + WMI_TLV_SERVICE_GTK_OFFLOAD = 18, + WMI_TLV_SERVICE_SCAN_SCH = 19, + WMI_TLV_SERVICE_CSA_OFFLOAD = 20, + WMI_TLV_SERVICE_CHATTER = 21, + WMI_TLV_SERVICE_COEX_FREQAVOID = 22, + WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23, + WMI_TLV_SERVICE_FORCE_FW_HANG = 24, + WMI_TLV_SERVICE_GPIO = 25, + WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26, + WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27, + WMI_STA_UAPSD_VAR_AUTO_TRIG = 28, + WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29, + WMI_TLV_SERVICE_TX_ENCAP = 30, + WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31, + WMI_TLV_SERVICE_EARLY_RX = 32, + WMI_TLV_SERVICE_STA_SMPS = 33, + WMI_TLV_SERVICE_FWTEST = 34, + WMI_TLV_SERVICE_STA_WMMAC = 35, + WMI_TLV_SERVICE_TDLS = 36, + WMI_TLV_SERVICE_BURST = 37, + WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38, + WMI_TLV_SERVICE_ADAPTIVE_OCS = 39, + WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40, + WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41, + WMI_TLV_SERVICE_WLAN_HB = 42, + WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43, + WMI_TLV_SERVICE_BATCH_SCAN = 44, + WMI_TLV_SERVICE_QPOWER = 45, + WMI_TLV_SERVICE_PLMREQ = 46, + WMI_TLV_SERVICE_THERMAL_MGMT = 47, + WMI_TLV_SERVICE_RMC = 48, + WMI_TLV_SERVICE_MHF_OFFLOAD = 49, + WMI_TLV_SERVICE_COEX_SAR = 50, + WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51, + WMI_TLV_SERVICE_NAN = 52, + WMI_TLV_SERVICE_L1SS_STAT = 53, + WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54, + WMI_TLV_SERVICE_OBSS_SCAN = 55, + WMI_TLV_SERVICE_TDLS_OFFCHAN = 56, + WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57, + WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58, + WMI_TLV_SERVICE_IBSS_PWRSAVE = 59, + WMI_TLV_SERVICE_LPASS = 60, + WMI_TLV_SERVICE_EXTSCAN = 61, + WMI_TLV_SERVICE_D0WOW = 62, + WMI_TLV_SERVICE_HSOFFLOAD = 63, + WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64, + WMI_TLV_SERVICE_RX_FULL_REORDER = 65, + WMI_TLV_SERVICE_DHCP_OFFLOAD = 66, + WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67, + WMI_TLV_SERVICE_MDNS_OFFLOAD = 68, + WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69, + WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70, + WMI_TLV_SERVICE_OCB = 71, + WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72, + WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73, + WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74, + WMI_TLV_SERVICE_MGMT_TX_HTT = 75, + WMI_TLV_SERVICE_MGMT_TX_WMI = 76, + WMI_TLV_SERVICE_EXT_MSG = 77, + WMI_TLV_SERVICE_MAWC = 78, + WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79, + WMI_TLV_SERVICE_EGAP = 80, + WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81, + WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82, + WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83, + WMI_TLV_SERVICE_ATF = 84, + WMI_TLV_SERVICE_COEX_GPIO = 85, + WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86, + WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87, + WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88, + WMI_TLV_SERVICE_ENTERPRISE_MESH = 89, + WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90, + WMI_TLV_SERVICE_BPF_OFFLOAD = 91, + WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92, + WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93, + WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94, + WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95, + WMI_TLV_SERVICE_NAN_DATA = 96, + WMI_TLV_SERVICE_NAN_RTT = 97, + WMI_TLV_SERVICE_11AX = 98, + WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99, + WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100, + WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101, + WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102, + WMI_TLV_SERVICE_MESH_11S = 103, + WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104, + WMI_TLV_SERVICE_VDEV_RX_FILTER = 105, + WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106, + WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107, + WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108, + WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109, + WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110, + WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111, + WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112, + WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113, + WMI_TLV_SERVICE_RCPI_SUPPORT = 114, + WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115, + WMI_TLV_SERVICE_PEER_STATS_INFO = 116, + WMI_TLV_SERVICE_REGULATORY_DB = 117, + WMI_TLV_SERVICE_11D_OFFLOAD = 118, + WMI_TLV_SERVICE_HW_DATA_FILTERING = 119, + WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120, + WMI_TLV_SERVICE_PKT_ROUTING = 121, + WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122, + WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123, + WMI_TLV_SERVICE_8SS_TX_BFEE = 124, + WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125, + WMI_TLV_SERVICE_ACK_TIMEOUT = 126, + WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127, + + WMI_MAX_SERVICE = 128, + + WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128, + WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129, + WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130, + WMI_TLV_SERVICE_FILS_SUPPORT = 131, + WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132, + WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133, + WMI_TLV_SERVICE_MAWC_SUPPORT = 134, + WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135, + WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136, + WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137, + WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138, + WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139, + WMI_TLV_SERVICE_THERM_THROT = 140, + WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141, + WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142, + WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143, + WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144, + WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145, + WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146, + WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147, + WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148, + WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149, + WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150, + WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151, + WMI_TLV_SERVICE_STA_TWT = 152, + WMI_TLV_SERVICE_AP_TWT = 153, + WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, + WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, + WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156, + WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159, + WMI_TLV_SERVICE_MOTION_DET = 160, + WMI_TLV_SERVICE_INFRA_MBSSID = 161, + WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162, + WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163, + WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164, + WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165, + WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166, + WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167, + WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168, + WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169, + WMI_TLV_SERVICE_ESP_SUPPORT = 170, + WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171, + WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172, + WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173, + WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174, + WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175, + WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176, + WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177, + WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178, + WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179, + WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180, + WMI_TLV_SERVICE_FETCH_TX_PN = 181, + WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182, + WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183, + WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184, + WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185, + WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186, + WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187, + WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188, + WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189, + WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190, + WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191, + WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192, + WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193, + WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194, + WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195, + WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196, + WMI_TLV_SERVICE_VOW_ENABLE = 197, + WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198, + WMI_TLV_SERVICE_BROADCAST_TWT = 199, + WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200, + WMI_TLV_SERVICE_PS_TDCC = 201, + WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202, + WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203, + WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204, + WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205, + WMI_TLV_SERVICE_WPA3_FT_FILS = 206, + WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207, + WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208, + WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209, + WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210, + WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211, + WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212, + WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213, + WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219, + WMI_TLV_SERVICE_EXT2_MSG = 220, + + WMI_MAX_EXT_SERVICE +}; + +enum { + WMI_SMPS_FORCED_MODE_NONE = 0, + WMI_SMPS_FORCED_MODE_DISABLED, + WMI_SMPS_FORCED_MODE_STATIC, + WMI_SMPS_FORCED_MODE_DYNAMIC +}; + +enum wmi_tpc_chainmask { + WMI_TPC_CHAINMASK_CONFIG_BAND_2G = 0, + WMI_TPC_CHAINMASK_CONFIG_BAND_5G = 1, + WMI_NUM_SUPPORTED_BAND_MAX = 2, +}; + +enum wmi_peer_param { + WMI_PEER_MIMO_PS_STATE = 1, + WMI_PEER_AMPDU = 2, + WMI_PEER_AUTHORIZE = 3, + WMI_PEER_CHWIDTH = 4, + WMI_PEER_NSS = 5, + WMI_PEER_USE_4ADDR = 6, + WMI_PEER_MEMBERSHIP = 7, + WMI_PEER_USERPOS = 8, + WMI_PEER_CRIT_PROTO_HINT_ENABLED = 9, + WMI_PEER_TX_FAIL_CNT_THR = 10, + WMI_PEER_SET_HW_RETRY_CTS2S = 11, + WMI_PEER_IBSS_ATIM_WINDOW_LENGTH = 12, + WMI_PEER_PHYMODE = 13, + WMI_PEER_USE_FIXED_PWR = 14, + WMI_PEER_PARAM_FIXED_RATE = 15, + WMI_PEER_SET_MU_WHITELIST = 16, + WMI_PEER_SET_MAX_TX_RATE = 17, + WMI_PEER_SET_MIN_TX_RATE = 18, + WMI_PEER_SET_DEFAULT_ROUTING = 19, +}; + +enum wmi_slot_time { + WMI_VDEV_SLOT_TIME_LONG = 1, + WMI_VDEV_SLOT_TIME_SHORT = 2, +}; + +enum wmi_preamble { + WMI_VDEV_PREAMBLE_LONG = 1, + WMI_VDEV_PREAMBLE_SHORT = 2, +}; + +enum wmi_peer_smps_state { + WMI_PEER_SMPS_PS_NONE = 0, + WMI_PEER_SMPS_STATIC = 1, + WMI_PEER_SMPS_DYNAMIC = 2 +}; + +enum wmi_peer_chwidth { + WMI_PEER_CHWIDTH_20MHZ = 0, + WMI_PEER_CHWIDTH_40MHZ = 1, + WMI_PEER_CHWIDTH_80MHZ = 2, + WMI_PEER_CHWIDTH_160MHZ = 3, +}; + +enum wmi_beacon_gen_mode { + WMI_BEACON_STAGGERED_MODE = 0, + WMI_BEACON_BURST_MODE = 1 +}; + +enum wmi_direct_buffer_module { + WMI_DIRECT_BUF_SPECTRAL = 0, + WMI_DIRECT_BUF_CFR = 1, + + /* keep it last */ + WMI_DIRECT_BUF_MAX +}; + +struct ath12k_wmi_pdev_band_arg { + u32 pdev_id; + u32 start_freq; + u32 end_freq; +}; + +struct ath12k_wmi_ppe_threshold_arg { + u32 numss_m1; + u32 ru_bit_mask; + u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS]; +}; + +#define PSOC_HOST_MAX_PHY_SIZE (3) +#define ATH12K_11B_SUPPORT BIT(0) +#define ATH12K_11G_SUPPORT BIT(1) +#define ATH12K_11A_SUPPORT BIT(2) +#define ATH12K_11N_SUPPORT BIT(3) +#define ATH12K_11AC_SUPPORT BIT(4) +#define ATH12K_11AX_SUPPORT BIT(5) + +struct ath12k_wmi_hal_reg_capabilities_ext_arg { + u32 phy_id; + u32 eeprom_reg_domain; + u32 eeprom_reg_domain_ext; + u32 regcap1; + u32 regcap2; + u32 wireless_modes; + u32 low_2ghz_chan; + u32 high_2ghz_chan; + u32 low_5ghz_chan; + u32 high_5ghz_chan; +}; + +#define WMI_HOST_MAX_PDEV 3 + +struct ath12k_wmi_host_mem_chunk_params { + __le32 tlv_header; + __le32 req_id; + __le32 ptr; + __le32 size; +} __packed; + +struct ath12k_wmi_host_mem_chunk_arg { + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 req_id; +}; + +struct ath12k_wmi_resource_config_arg { + u32 num_vdevs; + u32 num_peers; + u32 num_active_peers; + u32 num_offload_peers; + u32 num_offload_reorder_buffs; + u32 num_peer_keys; + u32 num_tids; + u32 ast_skid_limit; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 rx_timeout_pri[4]; + u32 rx_decap_mode; + u32 scan_max_pending_req; + u32 bmiss_offload_max_vdev; + u32 roam_offload_max_vdev; + u32 roam_offload_max_ap_profiles; + u32 num_mcast_groups; + u32 num_mcast_table_elems; + u32 mcast2ucast_mode; + u32 tx_dbg_log_size; + u32 num_wds_entries; + u32 dma_burst_size; + u32 mac_aggr_delim; + u32 rx_skip_defrag_timeout_dup_detection_check; + u32 vow_config; + u32 gtk_offload_max_vdev; + u32 num_msdu_desc; + u32 max_frag_entries; + u32 max_peer_ext_stats; + u32 smart_ant_cap; + u32 bk_minfree; + u32 be_minfree; + u32 vi_minfree; + u32 vo_minfree; + u32 rx_batchmode; + u32 tt_support; + u32 atf_config; + u32 iphdr_pad_config; + u32 qwrap_config:16, + alloc_frag_desc_for_data_pkt:16; + u32 num_tdls_vdevs; + u32 num_tdls_conn_table_entries; + u32 beacon_tx_offload_max_vdev; + u32 num_multicast_filter_entries; + u32 num_wow_filters; + u32 num_keep_alive_pattern; + u32 keep_alive_pattern_size; + u32 max_tdls_concurrent_sleep_sta; + u32 max_tdls_concurrent_buffer_sta; + u32 wmi_send_separate; + u32 num_ocb_vdevs; + u32 num_ocb_channels; + u32 num_ocb_schedules; + u32 num_ns_ext_tuples_cfg; + u32 bpf_instruction_size; + u32 max_bssid_rx_filters; + u32 use_pdev_id; + u32 peer_map_unmap_version; + u32 sched_params; + u32 twt_ap_pdev_count; + u32 twt_ap_sta_count; +}; + +struct ath12k_wmi_init_cmd_arg { + struct ath12k_wmi_resource_config_arg res_cfg; + u8 num_mem_chunks; + struct ath12k_wmi_host_mem_chunk_arg *mem_chunks; + u32 hw_mode_id; + u32 num_band_to_mac; + struct ath12k_wmi_pdev_band_arg band_to_mac[WMI_HOST_MAX_PDEV]; +}; + +struct ath12k_wmi_pdev_band_to_mac_params { + __le32 tlv_header; + __le32 pdev_id; + __le32 start_freq; + __le32 end_freq; +} __packed; + +/* This is both individual command WMI_PDEV_SET_HW_MODE_CMDID and also part + * of WMI_TAG_INIT_CMD. + */ +struct ath12k_wmi_pdev_set_hw_mode_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 hw_mode_index; + __le32 num_band_to_mac; +} __packed; + +struct ath12k_wmi_ppe_threshold_params { + __le32 numss_m1; /** NSS - 1*/ + __le32 ru_info; + __le32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS]; +} __packed; + +#define HW_BD_INFO_SIZE 5 + +struct ath12k_wmi_abi_version_params { + __le32 abi_version_0; + __le32 abi_version_1; + __le32 abi_version_ns_0; + __le32 abi_version_ns_1; + __le32 abi_version_ns_2; + __le32 abi_version_ns_3; +} __packed; + +struct wmi_init_cmd { + __le32 tlv_header; + struct ath12k_wmi_abi_version_params host_abi_vers; + __le32 num_host_mem_chunks; +} __packed; + +#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4 + +struct ath12k_wmi_resource_config_params { + __le32 tlv_header; + __le32 num_vdevs; + __le32 num_peers; + __le32 num_offload_peers; + __le32 num_offload_reorder_buffs; + __le32 num_peer_keys; + __le32 num_tids; + __le32 ast_skid_limit; + __le32 tx_chain_mask; + __le32 rx_chain_mask; + __le32 rx_timeout_pri[4]; + __le32 rx_decap_mode; + __le32 scan_max_pending_req; + __le32 bmiss_offload_max_vdev; + __le32 roam_offload_max_vdev; + __le32 roam_offload_max_ap_profiles; + __le32 num_mcast_groups; + __le32 num_mcast_table_elems; + __le32 mcast2ucast_mode; + __le32 tx_dbg_log_size; + __le32 num_wds_entries; + __le32 dma_burst_size; + __le32 mac_aggr_delim; + __le32 rx_skip_defrag_timeout_dup_detection_check; + __le32 vow_config; + __le32 gtk_offload_max_vdev; + __le32 num_msdu_desc; + __le32 max_frag_entries; + __le32 num_tdls_vdevs; + __le32 num_tdls_conn_table_entries; + __le32 beacon_tx_offload_max_vdev; + __le32 num_multicast_filter_entries; + __le32 num_wow_filters; + __le32 num_keep_alive_pattern; + __le32 keep_alive_pattern_size; + __le32 max_tdls_concurrent_sleep_sta; + __le32 max_tdls_concurrent_buffer_sta; + __le32 wmi_send_separate; + __le32 num_ocb_vdevs; + __le32 num_ocb_channels; + __le32 num_ocb_schedules; + __le32 flag1; + __le32 smart_ant_cap; + __le32 bk_minfree; + __le32 be_minfree; + __le32 vi_minfree; + __le32 vo_minfree; + __le32 alloc_frag_desc_for_data_pkt; + __le32 num_ns_ext_tuples_cfg; + __le32 bpf_instruction_size; + __le32 max_bssid_rx_filters; + __le32 use_pdev_id; + __le32 max_num_dbs_scan_duty_cycle; + __le32 max_num_group_keys; + __le32 peer_map_unmap_version; + __le32 sched_params; + __le32 twt_ap_pdev_count; + __le32 twt_ap_sta_count; + __le32 max_nlo_ssids; + __le32 num_pkt_filters; + __le32 num_max_sta_vdevs; + __le32 max_bssid_indicator; + __le32 ul_resp_config; + __le32 msdu_flow_override_config0; + __le32 msdu_flow_override_config1; + __le32 flags2; + __le32 host_service_flags; + __le32 max_rnr_neighbours; + __le32 ema_max_vap_cnt; + __le32 ema_max_profile_period; +} __packed; + +struct wmi_service_ready_event { + __le32 fw_build_vers; + struct ath12k_wmi_abi_version_params fw_abi_vers; + __le32 phy_capability; + __le32 max_frag_entry; + __le32 num_rf_chains; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 vht_supp_mcs; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 sys_cap_info; + __le32 min_pkt_size_enable; + __le32 max_bcn_ie_size; + __le32 num_mem_reqs; + __le32 max_num_scan_channels; + __le32 hw_bd_id; + __le32 hw_bd_info[HW_BD_INFO_SIZE]; + __le32 max_supported_macs; + __le32 wmi_fw_sub_feat_caps; + __le32 num_dbs_hw_modes; + /* txrx_chainmask + * [7:0] - 2G band tx chain mask + * [15:8] - 2G band rx chain mask + * [23:16] - 5G band tx chain mask + * [31:24] - 5G band rx chain mask + */ + __le32 txrx_chainmask; + __le32 default_dbs_hw_mode_index; + __le32 num_msdu_desc; +} __packed; + +#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32)) + +#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */ +#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32)) +#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32 +#define WMI_SERVICE_BITS_IN_SIZE32 4 + +struct wmi_service_ready_ext_event { + __le32 default_conc_scan_config_bits; + __le32 default_fw_config_bits; + struct ath12k_wmi_ppe_threshold_params ppet; + __le32 he_cap_info; + __le32 mpdu_density; + __le32 max_bssid_rx_filters; + __le32 fw_build_vers_ext; + __le32 max_nlo_ssids; + __le32 max_bssid_indicator; + __le32 he_cap_info_ext; +} __packed; + +struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params { + __le32 num_hw_modes; + __le32 num_chainmask_tables; +} __packed; + +struct ath12k_wmi_hw_mode_cap_params { + __le32 tlv_header; + __le32 hw_mode_id; + __le32 phy_id_map; + __le32 hw_mode_config_type; +} __packed; + +#define WMI_MAX_HECAP_PHY_SIZE (3) + +struct ath12k_wmi_mac_phy_caps_params { + __le32 hw_mode_id; + __le32 pdev_id; + __le32 phy_id; + __le32 supported_flags; + __le32 supported_bands; + __le32 ampdu_density; + __le32 max_bw_supported_2g; + __le32 ht_cap_info_2g; + __le32 vht_cap_info_2g; + __le32 vht_supp_mcs_2g; + __le32 he_cap_info_2g; + __le32 he_supp_mcs_2g; + __le32 tx_chain_mask_2g; + __le32 rx_chain_mask_2g; + __le32 max_bw_supported_5g; + __le32 ht_cap_info_5g; + __le32 vht_cap_info_5g; + __le32 vht_supp_mcs_5g; + __le32 he_cap_info_5g; + __le32 he_supp_mcs_5g; + __le32 tx_chain_mask_5g; + __le32 rx_chain_mask_5g; + __le32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE]; + __le32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE]; + struct ath12k_wmi_ppe_threshold_params he_ppet2g; + struct ath12k_wmi_ppe_threshold_params he_ppet5g; + __le32 chainmask_table_id; + __le32 lmac_id; + __le32 he_cap_info_2g_ext; + __le32 he_cap_info_5g_ext; + __le32 he_cap_info_internal; +} __packed; + +struct ath12k_wmi_hal_reg_caps_ext_params { + __le32 tlv_header; + __le32 phy_id; + __le32 eeprom_reg_domain; + __le32 eeprom_reg_domain_ext; + __le32 regcap1; + __le32 regcap2; + __le32 wireless_modes; + __le32 low_2ghz_chan; + __le32 high_2ghz_chan; + __le32 low_5ghz_chan; + __le32 high_5ghz_chan; +} __packed; + +struct ath12k_wmi_soc_hal_reg_caps_params { + __le32 num_phy; +} __packed; + +/* 2 word representation of MAC addr */ +struct ath12k_wmi_mac_addr_params { + u8 addr[ETH_ALEN]; + u8 padding[2]; +} __packed; + +struct ath12k_wmi_dma_ring_caps_params { + __le32 tlv_header; + __le32 pdev_id; + __le32 module_id; + __le32 min_elem; + __le32 min_buf_sz; + __le32 min_buf_align; +} __packed; + +struct ath12k_wmi_ready_event_min_params { + struct ath12k_wmi_abi_version_params fw_abi_vers; + struct ath12k_wmi_mac_addr_params mac_addr; + __le32 status; + __le32 num_dscp_table; + __le32 num_extra_mac_addr; + __le32 num_total_peers; + __le32 num_extra_peers; +} __packed; + +struct wmi_ready_event { + struct ath12k_wmi_ready_event_min_params ready_event_min; + __le32 max_ast_index; + __le32 pktlog_defs_checksum; +} __packed; + +struct wmi_service_available_event { + __le32 wmi_service_segment_offset; + __le32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32]; +} __packed; + +struct ath12k_wmi_vdev_create_arg { + u8 if_id; + u32 type; + u32 subtype; + struct { + u8 tx; + u8 rx; + } chains[NUM_NL80211_BANDS]; + u32 pdev_id; + u8 if_stats_id; +}; + +#define ATH12K_MAX_VDEV_STATS_ID 0x30 +#define ATH12K_INVAL_VDEV_STATS_ID 0xFF + +struct wmi_vdev_create_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 vdev_type; + __le32 vdev_subtype; + struct ath12k_wmi_mac_addr_params vdev_macaddr; + __le32 num_cfg_txrx_streams; + __le32 pdev_id; + __le32 vdev_stats_id; +} __packed; + +struct ath12k_wmi_vdev_txrx_streams_params { + __le32 tlv_header; + u32 band; + u32 supported_tx_streams; + u32 supported_rx_streams; +} __packed; + +struct wmi_vdev_delete_cmd { + __le32 tlv_header; + __le32 vdev_id; +} __packed; + +struct wmi_vdev_up_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 vdev_assoc_id; + struct ath12k_wmi_mac_addr_params vdev_bssid; + struct ath12k_wmi_mac_addr_params trans_bssid; + __le32 profile_idx; + __le32 profile_num; +} __packed; + +struct wmi_vdev_stop_cmd { + __le32 tlv_header; + __le32 vdev_id; +} __packed; + +struct wmi_vdev_down_cmd { + __le32 tlv_header; + __le32 vdev_id; +} __packed; + +#define WMI_VDEV_START_HIDDEN_SSID BIT(0) +#define WMI_VDEV_START_PMF_ENABLED BIT(1) +#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3) + +#define ATH12K_WMI_SSID_LEN 32 + +struct ath12k_wmi_ssid_params { + __le32 ssid_len; + u8 ssid[ATH12K_WMI_SSID_LEN]; +} __packed; + +#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ) + +struct wmi_vdev_start_request_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 requestor_id; + __le32 beacon_interval; + __le32 dtim_period; + __le32 flags; + struct ath12k_wmi_ssid_params ssid; + __le32 bcn_tx_rate; + __le32 bcn_txpower; + __le32 num_noa_descriptors; + __le32 disable_hw_ack; + __le32 preferred_tx_streams; + __le32 preferred_rx_streams; + __le32 he_ops; + __le32 cac_duration_ms; + __le32 regdomain; +} __packed; + +#define MGMT_TX_DL_FRM_LEN 64 + +struct ath12k_wmi_channel_arg { + u8 chan_id; + u8 pwr; + u32 mhz; + u32 half_rate:1, + quarter_rate:1, + dfs_set:1, + dfs_set_cfreq2:1, + is_chan_passive:1, + allow_ht:1, + allow_vht:1, + allow_he:1, + set_agile:1, + psc_channel:1; + u32 phy_mode; + u32 cfreq1; + u32 cfreq2; + char maxpower; + char minpower; + char maxregpower; + u8 antennamax; + u8 reg_class_id; +}; + +enum wmi_phy_mode { + MODE_11A = 0, + MODE_11G = 1, /* 11b/g Mode */ + MODE_11B = 2, /* 11b Mode */ + MODE_11GONLY = 3, /* 11g only Mode */ + MODE_11NA_HT20 = 4, + MODE_11NG_HT20 = 5, + MODE_11NA_HT40 = 6, + MODE_11NG_HT40 = 7, + MODE_11AC_VHT20 = 8, + MODE_11AC_VHT40 = 9, + MODE_11AC_VHT80 = 10, + MODE_11AC_VHT20_2G = 11, + MODE_11AC_VHT40_2G = 12, + MODE_11AC_VHT80_2G = 13, + MODE_11AC_VHT80_80 = 14, + MODE_11AC_VHT160 = 15, + MODE_11AX_HE20 = 16, + MODE_11AX_HE40 = 17, + MODE_11AX_HE80 = 18, + MODE_11AX_HE80_80 = 19, + MODE_11AX_HE160 = 20, + MODE_11AX_HE20_2G = 21, + MODE_11AX_HE40_2G = 22, + MODE_11AX_HE80_2G = 23, + MODE_UNKNOWN = 24, + MODE_MAX = 24 +}; + +struct wmi_vdev_start_req_arg { + u32 vdev_id; + u32 freq; + u32 band_center_freq1; + u32 band_center_freq2; + bool passive; + bool allow_ibss; + bool allow_ht; + bool allow_vht; + bool ht40plus; + bool chan_radar; + bool freq2_radar; + bool allow_he; + u32 min_power; + u32 max_power; + u32 max_reg_power; + u32 max_antenna_gain; + enum wmi_phy_mode mode; + u32 bcn_intval; + u32 dtim_period; + u8 *ssid; + u32 ssid_len; + u32 bcn_tx_rate; + u32 bcn_tx_power; + bool disable_hw_ack; + bool hidden_ssid; + bool pmf_enabled; + u32 he_ops; + u32 cac_duration_ms; + u32 regdomain; + u32 pref_rx_streams; + u32 pref_tx_streams; + u32 num_noa_descriptors; +}; + +struct ath12k_wmi_peer_create_arg { + const u8 *peer_addr; + u32 peer_type; + u32 vdev_id; +}; + +struct ath12k_wmi_pdev_set_regdomain_arg { + u16 current_rd_in_use; + u16 current_rd_2g; + u16 current_rd_5g; + u32 ctl_2g; + u32 ctl_5g; + u8 dfs_domain; + u32 pdev_id; +}; + +struct ath12k_wmi_rx_reorder_queue_remove_arg { + u8 *peer_macaddr; + u16 vdev_id; + u32 peer_tid_bitmap; +}; + +#define WMI_HOST_PDEV_ID_SOC 0xFF +#define WMI_HOST_PDEV_ID_0 0 +#define WMI_HOST_PDEV_ID_1 1 +#define WMI_HOST_PDEV_ID_2 2 + +#define WMI_PDEV_ID_SOC 0 +#define WMI_PDEV_ID_1ST 1 +#define WMI_PDEV_ID_2ND 2 +#define WMI_PDEV_ID_3RD 3 + +/* Freq units in MHz */ +#define REG_RULE_START_FREQ 0x0000ffff +#define REG_RULE_END_FREQ 0xffff0000 +#define REG_RULE_FLAGS 0x0000ffff +#define REG_RULE_MAX_BW 0x0000ffff +#define REG_RULE_REG_PWR 0x00ff0000 +#define REG_RULE_ANT_GAIN 0xff000000 +#define REG_RULE_PSD_INFO BIT(2) +#define REG_RULE_PSD_EIRP 0xffff0000 + +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) +#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) +#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) + +#define HECAP_PHYDWORD_0 0 +#define HECAP_PHYDWORD_1 1 +#define HECAP_PHYDWORD_2 2 + +#define HECAP_PHY_SU_BFER BIT(31) +#define HECAP_PHY_SU_BFEE BIT(0) +#define HECAP_PHY_MU_BFER BIT(1) +#define HECAP_PHY_UL_MUMIMO BIT(22) +#define HECAP_PHY_UL_MUOFDMA BIT(23) + +#define HECAP_PHY_SUBFMR_GET(hecap_phy) \ + u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_SU_BFER) + +#define HECAP_PHY_SUBFME_GET(hecap_phy) \ + u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_SU_BFEE) + +#define HECAP_PHY_MUBFMR_GET(hecap_phy) \ + u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_MU_BFER) + +#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \ + u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUMIMO) + +#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \ + u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUOFDMA) + +#define HE_MODE_SU_TX_BFEE BIT(0) +#define HE_MODE_SU_TX_BFER BIT(1) +#define HE_MODE_MU_TX_BFEE BIT(2) +#define HE_MODE_MU_TX_BFER BIT(3) +#define HE_MODE_DL_OFDMA BIT(4) +#define HE_MODE_UL_OFDMA BIT(5) +#define HE_MODE_UL_MUMIMO BIT(6) + +#define HE_DL_MUOFDMA_ENABLE 1 +#define HE_UL_MUOFDMA_ENABLE 1 +#define HE_DL_MUMIMO_ENABLE 1 +#define HE_MU_BFEE_ENABLE 1 +#define HE_SU_BFEE_ENABLE 1 + +#define HE_VHT_SOUNDING_MODE_ENABLE 1 +#define HE_SU_MU_SOUNDING_MODE_ENABLE 1 +#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1 + +/* HE or VHT Sounding */ +#define HE_VHT_SOUNDING_MODE BIT(0) +/* SU or MU Sounding */ +#define HE_SU_MU_SOUNDING_MODE BIT(2) +/* Trig or Non-Trig Sounding */ +#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3) + +#define WMI_TXBF_STS_CAP_OFFSET_LSB 4 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_BF_SOUND_DIM_OFFSET_LSB 8 +#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700 + +enum wmi_peer_type { + WMI_PEER_TYPE_DEFAULT = 0, + WMI_PEER_TYPE_BSS = 1, + WMI_PEER_TYPE_TDLS = 2, +}; + +struct wmi_peer_create_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 peer_type; +} __packed; + +struct wmi_peer_delete_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; +} __packed; + +struct wmi_peer_reorder_queue_setup_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 tid; + __le32 queue_ptr_lo; + __le32 queue_ptr_hi; + __le32 queue_no; + __le32 ba_window_size_valid; + __le32 ba_window_size; +} __packed; + +struct wmi_peer_reorder_queue_remove_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 tid_mask; +} __packed; + +enum wmi_bss_chan_info_req_type { + WMI_BSS_SURVEY_REQ_TYPE_READ = 1, + WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR, +}; + +struct wmi_pdev_set_param_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 param_id; + __le32 param_value; +} __packed; + +struct wmi_pdev_set_ps_mode_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 sta_ps_mode; +} __packed; + +struct wmi_pdev_suspend_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 suspend_opt; +} __packed; + +struct wmi_pdev_resume_cmd { + __le32 tlv_header; + __le32 pdev_id; +} __packed; + +struct wmi_pdev_bss_chan_info_req_cmd { + __le32 tlv_header; + /* ref wmi_bss_chan_info_req_type */ + __le32 req_type; +} __packed; + +struct wmi_ap_ps_peer_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 param; + __le32 value; +} __packed; + +struct wmi_sta_powersave_param_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 param; + __le32 value; +} __packed; + +struct wmi_pdev_set_regdomain_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 reg_domain; + __le32 reg_domain_2g; + __le32 reg_domain_5g; + __le32 conformance_test_limit_2g; + __le32 conformance_test_limit_5g; + __le32 dfs_domain; +} __packed; + +struct wmi_peer_set_param_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 param_id; + __le32 param_value; +} __packed; + +struct wmi_peer_flush_tids_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 peer_tid_bitmap; +} __packed; + +struct wmi_dfs_phyerr_offload_cmd { + __le32 tlv_header; + __le32 pdev_id; +} __packed; + +struct wmi_bcn_offload_ctrl_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 bcn_ctrl_op; +} __packed; + +enum scan_dwelltime_adaptive_mode { + SCAN_DWELL_MODE_DEFAULT = 0, + SCAN_DWELL_MODE_CONSERVATIVE = 1, + SCAN_DWELL_MODE_MODERATE = 2, + SCAN_DWELL_MODE_AGGRESSIVE = 3, + SCAN_DWELL_MODE_STATIC = 4 +}; + +#define WLAN_SCAN_MAX_NUM_SSID 10 +#define WLAN_SCAN_MAX_NUM_BSSID 10 +#define WLAN_SCAN_MAX_NUM_CHANNELS 40 + +struct ath12k_wmi_element_info_arg { + u32 len; + u8 *ptr; +}; + +#define WMI_IE_BITMAP_SIZE 8 + +#define WMI_SCAN_MAX_NUM_SSID 0x0A +/* prefix used by scan requestor ids on the host */ +#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000 + +/* prefix used by scan request ids generated on the host */ +/* host cycles through the lower 12 bits to generate ids */ +#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000 + +#define WLAN_SCAN_PARAMS_MAX_SSID 16 +#define WLAN_SCAN_PARAMS_MAX_BSSID 4 +#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 + +/* Values lower than this may be refused by some firmware revisions with a scan + * completion with a timedout reason. + */ +#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40 + +/* Scan priority numbers must be sequential, starting with 0 */ +enum wmi_scan_priority { + WMI_SCAN_PRIORITY_VERY_LOW = 0, + WMI_SCAN_PRIORITY_LOW, + WMI_SCAN_PRIORITY_MEDIUM, + WMI_SCAN_PRIORITY_HIGH, + WMI_SCAN_PRIORITY_VERY_HIGH, + WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ +}; + +enum wmi_scan_event_type { + WMI_SCAN_EVENT_STARTED = BIT(0), + WMI_SCAN_EVENT_COMPLETED = BIT(1), + WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2), + WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3), + WMI_SCAN_EVENT_DEQUEUED = BIT(4), + /* possibly by high-prio scan */ + WMI_SCAN_EVENT_PREEMPTED = BIT(5), + WMI_SCAN_EVENT_START_FAILED = BIT(6), + WMI_SCAN_EVENT_RESTARTED = BIT(7), + WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8), + WMI_SCAN_EVENT_SUSPENDED = BIT(9), + WMI_SCAN_EVENT_RESUMED = BIT(10), + WMI_SCAN_EVENT_MAX = BIT(15), +}; + +enum wmi_scan_completion_reason { + WMI_SCAN_REASON_COMPLETED, + WMI_SCAN_REASON_CANCELLED, + WMI_SCAN_REASON_PREEMPTED, + WMI_SCAN_REASON_TIMEDOUT, + WMI_SCAN_REASON_INTERNAL_FAILURE, + WMI_SCAN_REASON_MAX, +}; + +struct wmi_start_scan_cmd { + __le32 tlv_header; + __le32 scan_id; + __le32 scan_req_id; + __le32 vdev_id; + __le32 scan_priority; + __le32 notify_scan_events; + __le32 dwell_time_active; + __le32 dwell_time_passive; + __le32 min_rest_time; + __le32 max_rest_time; + __le32 repeat_probe_time; + __le32 probe_spacing_time; + __le32 idle_time; + __le32 max_scan_time; + __le32 probe_delay; + __le32 scan_ctrl_flags; + __le32 burst_duration; + __le32 num_chan; + __le32 num_bssid; + __le32 num_ssids; + __le32 ie_len; + __le32 n_probes; + struct ath12k_wmi_mac_addr_params mac_addr; + struct ath12k_wmi_mac_addr_params mac_mask; + u32 ie_bitmap[WMI_IE_BITMAP_SIZE]; + __le32 num_vendor_oui; + __le32 scan_ctrl_flags_ext; + __le32 dwell_time_active_2g; + __le32 dwell_time_active_6g; + __le32 dwell_time_passive_6g; + __le32 scan_start_offset; +} __packed; + +#define WMI_SCAN_FLAG_PASSIVE 0x1 +#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2 +#define WMI_SCAN_ADD_CCK_RATES 0x4 +#define WMI_SCAN_ADD_OFDM_RATES 0x8 +#define WMI_SCAN_CHAN_STAT_EVENT 0x10 +#define WMI_SCAN_FILTER_PROBE_REQ 0x20 +#define WMI_SCAN_BYPASS_DFS_CHN 0x40 +#define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +#define WMI_SCAN_FILTER_PROMISCUOS 0x100 +#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200 +#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400 +#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800 +#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000 +#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000 +#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000 +#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000 +#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000 +#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000 +#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000 +#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000 +#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000 + +#define WMI_SCAN_DWELL_MODE_MASK GENMASK(23, 21) + +enum { + WMI_SCAN_DWELL_MODE_DEFAULT = 0, + WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1, + WMI_SCAN_DWELL_MODE_MODERATE = 2, + WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3, + WMI_SCAN_DWELL_MODE_STATIC = 4, +}; + +struct ath12k_wmi_hint_short_ssid_arg { + u32 freq_flags; + u32 short_ssid; +}; + +struct ath12k_wmi_hint_bssid_arg { + u32 freq_flags; + struct ath12k_wmi_mac_addr_params bssid; +}; + +struct ath12k_wmi_scan_req_arg { + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 pdev_id; + enum wmi_scan_priority scan_priority; + union { + struct { + u32 scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + u32 scan_events; + }; + u32 dwell_time_active; + u32 dwell_time_active_2g; + u32 dwell_time_passive; + u32 dwell_time_active_6g; + u32 dwell_time_passive_6g; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + union { + struct { + u32 scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_80mhz:1; + }; + u32 scan_flags; + }; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + u32 burst_duration; + u32 num_chan; + u32 num_bssid; + u32 num_ssids; + u32 n_probes; + u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS]; + u32 notify_scan_events; + struct cfg80211_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; + struct ath12k_wmi_mac_addr_params bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; + struct ath12k_wmi_element_info_arg extraie; + u32 num_hint_s_ssid; + u32 num_hint_bssid; + struct ath12k_wmi_hint_short_ssid_arg hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID]; + struct ath12k_wmi_hint_bssid_arg hint_bssid[WLAN_SCAN_MAX_HINT_BSSID]; +}; + +struct wmi_ssid_arg { + int len; + const u8 *ssid; +}; + +struct wmi_bssid_arg { + const u8 *bssid; +}; + +struct wmi_start_scan_arg { + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 scan_priority; + u32 notify_scan_events; + u32 dwell_time_active; + u32 dwell_time_passive; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + u32 scan_ctrl_flags; + + u32 ie_len; + u32 n_channels; + u32 n_ssids; + u32 n_bssids; + + u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; + u32 channels[64]; + struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; + struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; +}; + +#define WMI_SCAN_STOP_ONE 0x00000000 +#define WMI_SCAN_STOP_VAP_ALL 0x01000000 +#define WMI_SCAN_STOP_ALL 0x04000000 + +/* Prefix 0xA000 indicates that the scan request + * is trigger by HOST + */ +#define ATH12K_SCAN_ID 0xA000 + +enum scan_cancel_req_type { + WLAN_SCAN_CANCEL_SINGLE = 1, + WLAN_SCAN_CANCEL_VDEV_ALL, + WLAN_SCAN_CANCEL_PDEV_ALL, +}; + +struct ath12k_wmi_scan_cancel_arg { + u32 requester; + u32 scan_id; + enum scan_cancel_req_type req_type; + u32 vdev_id; + u32 pdev_id; +}; + +struct wmi_bcn_send_from_host_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 data_len; + union { + __le32 frag_ptr; + __le32 frag_ptr_lo; + }; + __le32 frame_ctrl; + __le32 dtim_flag; + __le32 bcn_antenna; + __le32 frag_ptr_hi; +}; + +#define WMI_CHAN_INFO_MODE GENMASK(5, 0) +#define WMI_CHAN_INFO_HT40_PLUS BIT(6) +#define WMI_CHAN_INFO_PASSIVE BIT(7) +#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8) +#define WMI_CHAN_INFO_AP_DISABLED BIT(9) +#define WMI_CHAN_INFO_DFS BIT(10) +#define WMI_CHAN_INFO_ALLOW_HT BIT(11) +#define WMI_CHAN_INFO_ALLOW_VHT BIT(12) +#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13) +#define WMI_CHAN_INFO_HALF_RATE BIT(14) +#define WMI_CHAN_INFO_QUARTER_RATE BIT(15) +#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16) +#define WMI_CHAN_INFO_ALLOW_HE BIT(17) +#define WMI_CHAN_INFO_PSC BIT(18) + +#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0) +#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8) +#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16) +#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24) + +#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0) +#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8) + +struct ath12k_wmi_channel_params { + __le32 tlv_header; + __le32 mhz; + __le32 band_center_freq1; + __le32 band_center_freq2; + __le32 info; + __le32 reg_info_1; + __le32 reg_info_2; +} __packed; + +enum wmi_sta_ps_mode { + WMI_STA_PS_MODE_DISABLED = 0, + WMI_STA_PS_MODE_ENABLED = 1, +}; + +#define WMI_SMPS_MASK_LOWER_16BITS 0xFF +#define WMI_SMPS_MASK_UPPER_3BITS 0x7 +#define WMI_SMPS_PARAM_VALUE_SHIFT 29 + +#define ATH12K_WMI_FW_HANG_ASSERT_TYPE 1 +#define ATH12K_WMI_FW_HANG_DELAY 0 + +/* type, 0:unused 1: ASSERT 2: not respond detect command + * delay_time_ms, the simulate will delay time + */ + +struct wmi_force_fw_hang_cmd { + __le32 tlv_header; + __le32 type; + __le32 delay_time_ms; +} __packed; + +struct wmi_vdev_set_param_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 param_id; + __le32 param_value; +} __packed; + +struct wmi_get_pdev_temperature_cmd { + __le32 tlv_header; + __le32 param; + __le32 pdev_id; +} __packed; + +#define WMI_BEACON_TX_BUFFER_SIZE 512 + +struct wmi_bcn_tmpl_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 tim_ie_offset; + __le32 buf_len; + __le32 csa_switch_count_offset; + __le32 ext_csa_switch_count_offset; + __le32 csa_event_bitmap; + __le32 mbssid_ie_offset; + __le32 esp_ie_offset; +} __packed; + +struct wmi_vdev_install_key_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 key_idx; + __le32 key_flags; + __le32 key_cipher; + __le64 key_rsc_counter; + __le64 key_global_rsc_counter; + __le64 key_tsc_counter; + u8 wpi_key_rsc_counter[16]; + u8 wpi_key_tsc_counter[16]; + __le32 key_len; + __le32 key_txmic_len; + __le32 key_rxmic_len; + __le32 is_group_key_id_valid; + __le32 group_key_id; + + /* Followed by key_data containing key followed by + * tx mic and then rx mic + */ +} __packed; + +struct wmi_vdev_install_key_arg { + u32 vdev_id; + const u8 *macaddr; + u32 key_idx; + u32 key_flags; + u32 key_cipher; + u32 key_len; + u32 key_txmic_len; + u32 key_rxmic_len; + u64 key_rsc_counter; + const void *key_data; +}; + +#define WMI_MAX_SUPPORTED_RATES 128 +#define WMI_HOST_MAX_HECAP_PHY_SIZE 3 +#define WMI_HOST_MAX_HE_RATE_SET 3 +#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0 +#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1 +#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2 + +struct wmi_rate_set_arg { + u32 num_rates; + u8 rates[WMI_MAX_SUPPORTED_RATES]; +}; + +struct ath12k_wmi_peer_assoc_arg { + u32 vdev_id; + u32 peer_new_assoc; + u32 peer_associd; + u32 peer_flags; + u32 peer_caps; + u32 peer_listen_intval; + u32 peer_ht_caps; + u32 peer_max_mpdu; + u32 peer_mpdu_density; + u32 peer_rate_caps; + u32 peer_nss; + u32 peer_vht_caps; + u32 peer_phymode; + u32 peer_ht_info[2]; + struct wmi_rate_set_arg peer_legacy_rates; + struct wmi_rate_set_arg peer_ht_rates; + u32 rx_max_rate; + u32 rx_mcs_set; + u32 tx_max_rate; + u32 tx_mcs_set; + u8 vht_capable; + u8 min_data_rate; + u32 tx_max_mcs_nss; + u32 peer_bw_rxnss_override; + bool is_pmf_enabled; + bool is_wme_set; + bool qos_flag; + bool apsd_flag; + bool ht_flag; + bool bw_40; + bool bw_80; + bool bw_160; + bool stbc_flag; + bool ldpc_flag; + bool static_mimops_flag; + bool dynamic_mimops_flag; + bool spatial_mux_flag; + bool vht_flag; + bool vht_ng_flag; + bool need_ptk_4_way; + bool need_gtk_2_way; + bool auth_flag; + bool safe_mode_enabled; + bool amsdu_disable; + /* Use common structure */ + u8 peer_mac[ETH_ALEN]; + + bool he_flag; + u32 peer_he_cap_macinfo[2]; + u32 peer_he_cap_macinfo_internal; + u32 peer_he_caps_6ghz; + u32 peer_he_ops; + u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE]; + u32 peer_he_mcs_count; + u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET]; + u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET]; + bool twt_responder; + bool twt_requester; + struct ath12k_wmi_ppe_threshold_arg peer_ppet; +}; + +struct wmi_peer_assoc_complete_cmd { + __le32 tlv_header; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 vdev_id; + __le32 peer_new_assoc; + __le32 peer_associd; + __le32 peer_flags; + __le32 peer_caps; + __le32 peer_listen_intval; + __le32 peer_ht_caps; + __le32 peer_max_mpdu; + __le32 peer_mpdu_density; + __le32 peer_rate_caps; + __le32 peer_nss; + __le32 peer_vht_caps; + __le32 peer_phymode; + __le32 peer_ht_info[2]; + __le32 num_peer_legacy_rates; + __le32 num_peer_ht_rates; + __le32 peer_bw_rxnss_override; + struct ath12k_wmi_ppe_threshold_params peer_ppet; + __le32 peer_he_cap_info; + __le32 peer_he_ops; + __le32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE]; + __le32 peer_he_mcs; + __le32 peer_he_cap_info_ext; + __le32 peer_he_cap_info_internal; + __le32 min_data_rate; + __le32 peer_he_caps_6ghz; +} __packed; + +struct wmi_stop_scan_cmd { + __le32 tlv_header; + __le32 requestor; + __le32 scan_id; + __le32 req_type; + __le32 vdev_id; + __le32 pdev_id; +} __packed; + +struct ath12k_wmi_scan_chan_list_arg { + u32 pdev_id; + u16 nallchans; + struct ath12k_wmi_channel_arg channel[]; +}; + +struct wmi_scan_chan_list_cmd { + __le32 tlv_header; + __le32 num_scan_chans; + __le32 flags; + __le32 pdev_id; +} __packed; + +#define WMI_MGMT_SEND_DOWNLD_LEN 64 + +#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0) +#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8) +#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20) +#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28) + +#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0) +#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8) +#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15) +#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20) +#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21) + +struct wmi_mgmt_send_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 desc_id; + __le32 chanfreq; + __le32 paddr_lo; + __le32 paddr_hi; + __le32 frame_len; + __le32 buf_len; + __le32 tx_params_valid; + + /* This TLV is followed by struct wmi_mgmt_frame */ + + /* Followed by struct wmi_mgmt_send_params */ +} __packed; + +struct wmi_sta_powersave_mode_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 sta_ps_mode; +} __packed; + +struct wmi_sta_smps_force_mode_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 forced_mode; +} __packed; + +struct wmi_sta_smps_param_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 param; + __le32 value; +} __packed; + +struct ath12k_wmi_bcn_prb_info_params { + __le32 tlv_header; + __le32 caps; + __le32 erp; +} __packed; + +enum { + WMI_PDEV_SUSPEND, + WMI_PDEV_SUSPEND_AND_DISABLE_INTR, +}; + +struct wmi_pdev_green_ap_ps_enable_cmd_param { + __le32 tlv_header; + __le32 pdev_id; + __le32 enable; +} __packed; + +struct ath12k_wmi_ap_ps_arg { + u32 vdev_id; + u32 param; + u32 value; +}; + +enum set_init_cc_type { + WMI_COUNTRY_INFO_TYPE_ALPHA, + WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE, + WMI_COUNTRY_INFO_TYPE_REGDOMAIN, +}; + +enum set_init_cc_flags { + INVALID_CC, + CC_IS_SET, + REGDMN_IS_SET, + ALPHA_IS_SET, +}; + +struct ath12k_wmi_init_country_arg { + union { + u16 country_code; + u16 regdom_id; + u8 alpha2[3]; + } cc_info; + enum set_init_cc_flags flags; +}; + +struct wmi_init_country_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 init_cc_type; + union { + __le32 country_code; + __le32 regdom_id; + __le32 alpha2; + } cc_info; +} __packed; + +struct wmi_delba_send_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 tid; + __le32 initiator; + __le32 reasoncode; +} __packed; + +struct wmi_addba_setresponse_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 tid; + __le32 statuscode; +} __packed; + +struct wmi_addba_send_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 tid; + __le32 buffersize; +} __packed; + +struct wmi_addba_clear_resp_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; +} __packed; + +#define DFS_PHYERR_UNIT_TEST_CMD 0 +#define DFS_UNIT_TEST_MODULE 0x2b +#define DFS_UNIT_TEST_TOKEN 0xAA + +enum dfs_test_args_idx { + DFS_TEST_CMDID = 0, + DFS_TEST_PDEV_ID, + DFS_TEST_RADAR_PARAM, + DFS_MAX_TEST_ARGS, +}; + +struct wmi_dfs_unit_test_arg { + u32 cmd_id; + u32 pdev_id; + u32 radar_param; +}; + +struct wmi_unit_test_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 module_id; + __le32 num_args; + __le32 diag_token; + /* Followed by test args*/ +} __packed; + +#define MAX_SUPPORTED_RATES 128 + +#define WMI_PEER_AUTH 0x00000001 +#define WMI_PEER_QOS 0x00000002 +#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 +#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 +#define WMI_PEER_HE 0x00000400 +#define WMI_PEER_APSD 0x00000800 +#define WMI_PEER_HT 0x00001000 +#define WMI_PEER_40MHZ 0x00002000 +#define WMI_PEER_STBC 0x00008000 +#define WMI_PEER_LDPC 0x00010000 +#define WMI_PEER_DYN_MIMOPS 0x00020000 +#define WMI_PEER_STATIC_MIMOPS 0x00040000 +#define WMI_PEER_SPATIAL_MUX 0x00200000 +#define WMI_PEER_TWT_REQ 0x00400000 +#define WMI_PEER_TWT_RESP 0x00800000 +#define WMI_PEER_VHT 0x02000000 +#define WMI_PEER_80MHZ 0x04000000 +#define WMI_PEER_PMF 0x08000000 +/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000. + * Need to be cleaned up + */ +#define WMI_PEER_IS_P2P_CAPABLE 0x20000000 +#define WMI_PEER_160MHZ 0x40000000 +#define WMI_PEER_SAFEMODE_EN 0x80000000 + +struct ath12k_wmi_vht_rate_set_params { + __le32 tlv_header; + __le32 rx_max_rate; + __le32 rx_mcs_set; + __le32 tx_max_rate; + __le32 tx_mcs_set; + __le32 tx_max_mcs_nss; +} __packed; + +struct ath12k_wmi_he_rate_set_params { + __le32 tlv_header; + __le32 rx_mcs_set; + __le32 tx_mcs_set; +} __packed; + +#define MAX_REG_RULES 10 +#define REG_ALPHA2_LEN 2 +#define MAX_6G_REG_RULES 5 +#define REG_US_5G_NUM_REG_RULES 4 + +enum wmi_start_event_param { + WMI_VDEV_START_RESP_EVENT = 0, + WMI_VDEV_RESTART_RESP_EVENT, +}; + +struct wmi_vdev_start_resp_event { + __le32 vdev_id; + __le32 requestor_id; + /* enum wmi_start_event_param */ + __le32 resp_type; + __le32 status; + __le32 chain_mask; + __le32 smps_mode; + union { + __le32 mac_id; + __le32 pdev_id; + }; + __le32 cfgd_tx_streams; + __le32 cfgd_rx_streams; +} __packed; + +/* VDEV start response status codes */ +enum wmi_vdev_start_resp_status_code { + WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0, + WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1, + WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2, + WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3, + WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4, +}; + +enum wmi_reg_6g_ap_type { + WMI_REG_INDOOR_AP = 0, + WMI_REG_STD_POWER_AP = 1, + WMI_REG_VLP_AP = 2, + WMI_REG_CURRENT_MAX_AP_TYPE, + WMI_REG_MAX_SUPP_AP_TYPE = WMI_REG_VLP_AP, + WMI_REG_MAX_AP_TYPE = 7, +}; + +enum wmi_reg_6g_client_type { + WMI_REG_DEFAULT_CLIENT = 0, + WMI_REG_SUBORDINATE_CLIENT = 1, + WMI_REG_MAX_CLIENT_TYPE = 2, +}; + +/* Regulatory Rule Flags Passed by FW */ +#define REGULATORY_CHAN_DISABLED BIT(0) +#define REGULATORY_CHAN_NO_IR BIT(1) +#define REGULATORY_CHAN_RADAR BIT(3) +#define REGULATORY_CHAN_NO_OFDM BIT(6) +#define REGULATORY_CHAN_INDOOR_ONLY BIT(9) + +#define REGULATORY_CHAN_NO_HT40 BIT(4) +#define REGULATORY_CHAN_NO_80MHZ BIT(7) +#define REGULATORY_CHAN_NO_160MHZ BIT(8) +#define REGULATORY_CHAN_NO_20MHZ BIT(11) +#define REGULATORY_CHAN_NO_10MHZ BIT(12) + +enum { + WMI_REG_SET_CC_STATUS_PASS = 0, + WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1, + WMI_REG_INIT_ALPHA2_NOT_FOUND = 2, + WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3, + WMI_REG_SET_CC_STATUS_NO_MEMORY = 4, + WMI_REG_SET_CC_STATUS_FAIL = 5, +}; + +#define WMI_REG_CLIENT_MAX 4 + +struct wmi_reg_chan_list_cc_ext_event { + __le32 status_code; + __le32 phy_id; + __le32 alpha2; + __le32 num_phy; + __le32 country_id; + __le32 domain_code; + __le32 dfs_region; + __le32 phybitmap; + __le32 min_bw_2g; + __le32 max_bw_2g; + __le32 min_bw_5g; + __le32 max_bw_5g; + __le32 num_2g_reg_rules; + __le32 num_5g_reg_rules; + __le32 client_type; + __le32 rnr_tpe_usable; + __le32 unspecified_ap_usable; + __le32 domain_code_6g_ap_lpi; + __le32 domain_code_6g_ap_sp; + __le32 domain_code_6g_ap_vlp; + __le32 domain_code_6g_client_lpi[WMI_REG_CLIENT_MAX]; + __le32 domain_code_6g_client_sp[WMI_REG_CLIENT_MAX]; + __le32 domain_code_6g_client_vlp[WMI_REG_CLIENT_MAX]; + __le32 domain_code_6g_super_id; + __le32 min_bw_6g_ap_sp; + __le32 max_bw_6g_ap_sp; + __le32 min_bw_6g_ap_lpi; + __le32 max_bw_6g_ap_lpi; + __le32 min_bw_6g_ap_vlp; + __le32 max_bw_6g_ap_vlp; + __le32 min_bw_6g_client_sp[WMI_REG_CLIENT_MAX]; + __le32 max_bw_6g_client_sp[WMI_REG_CLIENT_MAX]; + __le32 min_bw_6g_client_lpi[WMI_REG_CLIENT_MAX]; + __le32 max_bw_6g_client_lpi[WMI_REG_CLIENT_MAX]; + __le32 min_bw_6g_client_vlp[WMI_REG_CLIENT_MAX]; + __le32 max_bw_6g_client_vlp[WMI_REG_CLIENT_MAX]; + __le32 num_6g_reg_rules_ap_sp; + __le32 num_6g_reg_rules_ap_lpi; + __le32 num_6g_reg_rules_ap_vlp; + __le32 num_6g_reg_rules_cl_sp[WMI_REG_CLIENT_MAX]; + __le32 num_6g_reg_rules_cl_lpi[WMI_REG_CLIENT_MAX]; + __le32 num_6g_reg_rules_cl_vlp[WMI_REG_CLIENT_MAX]; +} __packed; + +struct ath12k_wmi_reg_rule_ext_params { + __le32 tlv_header; + __le32 freq_info; + __le32 bw_pwr_info; + __le32 flag_info; + __le32 psd_power_info; +} __packed; + +struct wmi_vdev_delete_resp_event { + __le32 vdev_id; +} __packed; + +struct wmi_peer_delete_resp_event { + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; +} __packed; + +struct wmi_bcn_tx_status_event { + __le32 vdev_id; + __le32 tx_status; +} __packed; + +struct wmi_vdev_stopped_event { + __le32 vdev_id; +} __packed; + +struct wmi_pdev_bss_chan_info_event { + __le32 pdev_id; + __le32 freq; /* Units in MHz */ + __le32 noise_floor; /* units are dBm */ + /* rx clear - how often the channel was unused */ + __le32 rx_clear_count_low; + __le32 rx_clear_count_high; + /* cycle count - elapsed time during measured period, in clock ticks */ + __le32 cycle_count_low; + __le32 cycle_count_high; + /* tx cycle count - elapsed time spent in tx, in clock ticks */ + __le32 tx_cycle_count_low; + __le32 tx_cycle_count_high; + /* rx cycle count - elapsed time spent in rx, in clock ticks */ + __le32 rx_cycle_count_low; + __le32 rx_cycle_count_high; + /*rx_cycle cnt for my bss in 64bits format */ + __le32 rx_bss_cycle_count_low; + __le32 rx_bss_cycle_count_high; +} __packed; + +#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 + +struct wmi_vdev_install_key_compl_event { + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 key_idx; + __le32 key_flags; + __le32 status; +} __packed; + +struct wmi_vdev_install_key_complete_arg { + u32 vdev_id; + const u8 *macaddr; + u32 key_idx; + u32 key_flags; + u32 status; +}; + +struct wmi_peer_assoc_conf_event { + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; +} __packed; + +struct wmi_peer_assoc_conf_arg { + u32 vdev_id; + const u8 *macaddr; +}; + +struct wmi_fils_discovery_event { + __le32 vdev_id; + __le32 fils_tt; + __le32 tbtt; +} __packed; + +struct wmi_probe_resp_tx_status_event { + __le32 vdev_id; + __le32 tx_status; +} __packed; + +struct wmi_pdev_ctl_failsafe_chk_event { + __le32 pdev_id; + __le32 ctl_failsafe_status; +} __packed; + +struct ath12k_wmi_pdev_csa_event { + __le32 pdev_id; + __le32 current_switch_count; + __le32 num_vdevs; +} __packed; + +struct ath12k_wmi_pdev_radar_event { + __le32 pdev_id; + __le32 detection_mode; + __le32 chan_freq; + __le32 chan_width; + __le32 detector_id; + __le32 segment_id; + __le32 timestamp; + __le32 is_chirp; + a_sle32 freq_offset; + a_sle32 sidx; +} __packed; + +struct wmi_pdev_temperature_event { + /* temperature value in Celcius degree */ + a_sle32 temp; + __le32 pdev_id; +} __packed; + +#define WMI_RX_STATUS_OK 0x00 +#define WMI_RX_STATUS_ERR_CRC 0x01 +#define WMI_RX_STATUS_ERR_DECRYPT 0x08 +#define WMI_RX_STATUS_ERR_MIC 0x10 +#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 + +#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4 + +struct ath12k_wmi_mgmt_rx_arg { + u32 chan_freq; + u32 channel; + u32 snr; + u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; + u32 rate; + enum wmi_phy_mode phy_mode; + u32 buf_len; + int status; + u32 flags; + int rssi; + u32 tsf_delta; + u8 pdev_id; +}; + +#define ATH_MAX_ANTENNA 4 + +struct ath12k_wmi_mgmt_rx_params { + __le32 channel; + __le32 snr; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; + __le32 rssi_ctl[ATH_MAX_ANTENNA]; + __le32 flags; + a_sle32 rssi; + __le32 tsf_delta; + __le32 rx_tsf_l32; + __le32 rx_tsf_u32; + __le32 pdev_id; + __le32 chan_freq; +} __packed; + +#define MAX_ANTENNA_EIGHT 8 + +struct wmi_mgmt_tx_compl_event { + __le32 desc_id; + __le32 status; + __le32 pdev_id; +} __packed; + +struct wmi_scan_event { + __le32 event_type; /* %WMI_SCAN_EVENT_ */ + __le32 reason; /* %WMI_SCAN_REASON_ */ + __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */ + __le32 scan_req_id; + __le32 scan_id; + __le32 vdev_id; + /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed + * In case of AP it is TSF of the AP vdev + * In case of STA connected state, this is the TSF of the AP + * In case of STA not connected, it will be the free running HW timer + */ + __le32 tsf_timestamp; +} __packed; + +struct wmi_peer_sta_kickout_arg { + const u8 *mac_addr; +}; + +struct wmi_peer_sta_kickout_event { + struct ath12k_wmi_mac_addr_params peer_macaddr; +} __packed; + +enum wmi_roam_reason { + WMI_ROAM_REASON_BETTER_AP = 1, + WMI_ROAM_REASON_BEACON_MISS = 2, + WMI_ROAM_REASON_LOW_RSSI = 3, + WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4, + WMI_ROAM_REASON_HO_FAILED = 5, + + /* keep last */ + WMI_ROAM_REASON_MAX, +}; + +struct wmi_roam_event { + __le32 vdev_id; + __le32 reason; + __le32 rssi; +} __packed; + +#define WMI_CHAN_INFO_START_RESP 0 +#define WMI_CHAN_INFO_END_RESP 1 + +struct wmi_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; + __le32 vdev_id; +} __packed; + +struct ath12k_wmi_target_cap_arg { + u32 phy_capability; + u32 max_frag_entry; + u32 num_rf_chains; + u32 ht_cap_info; + u32 vht_cap_info; + u32 vht_supp_mcs; + u32 hw_min_tx_power; + u32 hw_max_tx_power; + u32 sys_cap_info; + u32 min_pkt_size_enable; + u32 max_bcn_ie_size; + u32 max_num_scan_channels; + u32 max_supported_macs; + u32 wmi_fw_sub_feat_caps; + u32 txrx_chainmask; + u32 default_dbs_hw_mode_index; + u32 num_msdu_desc; +}; + +enum wmi_vdev_type { + WMI_VDEV_TYPE_AP = 1, + WMI_VDEV_TYPE_STA = 2, + WMI_VDEV_TYPE_IBSS = 3, + WMI_VDEV_TYPE_MONITOR = 4, +}; + +enum wmi_vdev_subtype { + WMI_VDEV_SUBTYPE_NONE, + WMI_VDEV_SUBTYPE_P2P_DEVICE, + WMI_VDEV_SUBTYPE_P2P_CLIENT, + WMI_VDEV_SUBTYPE_P2P_GO, + WMI_VDEV_SUBTYPE_PROXY_STA, + WMI_VDEV_SUBTYPE_MESH_NON_11S, + WMI_VDEV_SUBTYPE_MESH_11S, +}; + +enum wmi_sta_powersave_param { + WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0, + WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1, + WMI_STA_PS_PARAM_PSPOLL_COUNT = 2, + WMI_STA_PS_PARAM_INACTIVITY_TIME = 3, + WMI_STA_PS_PARAM_UAPSD = 4, +}; + +enum wmi_sta_ps_param_uapsd { + WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +enum wmi_sta_ps_param_tx_wake_threshold { + WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0, + WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1, + + /* Values greater than one indicate that many TX attempts per beacon + * interval before the STA will wake up + */ +}; + +/* The maximum number of PS-Poll frames the FW will send in response to + * traffic advertised in TIM before waking up (by sending a null frame with PS + * = 0). Value 0 has a special meaning: there is no maximum count and the FW + * will send as many PS-Poll as are necessary to retrieve buffered BU. This + * parameter is used when the RX wake policy is + * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake + * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE. + */ +enum wmi_sta_ps_param_pspoll_count { + WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0, + /* Values greater than 0 indicate the maximum numer of PS-Poll frames + * FW will send before waking up. + */ +}; + +/* U-APSD configuration of peer station from (re)assoc request and TSPECs */ +enum wmi_ap_ps_param_uapsd { + WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +/* U-APSD maximum service period of peer station */ +enum wmi_ap_ps_peer_param_max_sp { + WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0, + WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1, + WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2, + WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3, + MAX_WMI_AP_PS_PEER_PARAM_MAX_SP, +}; + +enum wmi_ap_ps_peer_param { + /** Set uapsd configuration for a given peer. + * + * This include the delivery and trigger enabled state for each AC. + * The host MLME needs to set this based on AP capability and stations + * request Set in the association request received from the station. + * + * Lower 8 bits of the value specify the UAPSD configuration. + * + * (see enum wmi_ap_ps_param_uapsd) + * The default value is 0. + */ + WMI_AP_PS_PEER_PARAM_UAPSD = 0, + + /** + * Set the service period for a UAPSD capable station + * + * The service period from wme ie in the (re)assoc request frame. + * + * (see enum wmi_ap_ps_peer_param_max_sp) + */ + WMI_AP_PS_PEER_PARAM_MAX_SP = 1, + + /** Time in seconds for aging out buffered frames + * for STA in power save + */ + WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2, + + /** Specify frame types that are considered SIFS + * RESP trigger frame + */ + WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3, + + /** Specifies the trigger state of TID. + * Valid only for UAPSD frame type + */ + WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4, + + /* Specifies the WNM sleep state of a STA */ + WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5, +}; + +#define DISABLE_SIFS_RESPONSE_TRIGGER 0 + +#define WMI_MAX_KEY_INDEX 3 +#define WMI_MAX_KEY_LEN 32 + +enum wmi_key_type { + WMI_KEY_PAIRWISE = 0, + WMI_KEY_GROUP = 1, +}; + +enum wmi_cipher_type { + WMI_CIPHER_NONE = 0, /* clear key */ + WMI_CIPHER_WEP = 1, + WMI_CIPHER_TKIP = 2, + WMI_CIPHER_AES_OCB = 3, + WMI_CIPHER_AES_CCM = 4, + WMI_CIPHER_WAPI = 5, + WMI_CIPHER_CKIP = 6, + WMI_CIPHER_AES_CMAC = 7, + WMI_CIPHER_ANY = 8, + WMI_CIPHER_AES_GCM = 9, + WMI_CIPHER_AES_GMAC = 10, +}; + +/* Value to disable fixed rate setting */ +#define WMI_FIXED_RATE_NONE (0xffff) + +#define ATH12K_RC_VERSION_OFFSET 28 +#define ATH12K_RC_PREAMBLE_OFFSET 8 +#define ATH12K_RC_NSS_OFFSET 5 + +#define ATH12K_HW_RATE_CODE(rate, nss, preamble) \ + ((1 << ATH12K_RC_VERSION_OFFSET) | \ + ((nss) << ATH12K_RC_NSS_OFFSET) | \ + ((preamble) << ATH12K_RC_PREAMBLE_OFFSET) | \ + (rate)) + +/* Preamble types to be used with VDEV fixed rate configuration */ +enum wmi_rate_preamble { + WMI_RATE_PREAMBLE_OFDM, + WMI_RATE_PREAMBLE_CCK, + WMI_RATE_PREAMBLE_HT, + WMI_RATE_PREAMBLE_VHT, + WMI_RATE_PREAMBLE_HE, +}; + +/** + * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection. + * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled. + * @WMI_USE_RTS_CTS: RTS/CTS Enabled. + * @WMI_USE_CTS2SELF: CTS to self protection Enabled. + */ +enum wmi_rtscts_prot_mode { + WMI_RTS_CTS_DISABLED = 0, + WMI_USE_RTS_CTS = 1, + WMI_USE_CTS2SELF = 2, +}; + +/** + * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling + * protection mode. + * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS + * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS + * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS, + * but if there's a sw retry, both the rate + * series will use RTS-CTS. + * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU. + * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series. + */ +enum wmi_rtscts_profile { + WMI_RTSCTS_FOR_NO_RATESERIES = 0, + WMI_RTSCTS_FOR_SECOND_RATESERIES = 1, + WMI_RTSCTS_ACROSS_SW_RETRIES = 2, + WMI_RTSCTS_ERP = 3, + WMI_RTSCTS_FOR_ALL_RATESERIES = 4, +}; + +#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) + +enum wmi_sta_ps_param_rx_wake_policy { + WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0, + WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, +}; + +/* Do not change existing values! Used by ath12k_frame_mode parameter + * module parameter. + */ +enum ath12k_hw_txrx_mode { + ATH12K_HW_TXRX_RAW = 0, + ATH12K_HW_TXRX_NATIVE_WIFI = 1, + ATH12K_HW_TXRX_ETHERNET = 2, +}; + +struct wmi_wmm_params { + __le32 tlv_header; + __le32 cwmin; + __le32 cwmax; + __le32 aifs; + __le32 txoplimit; + __le32 acm; + __le32 no_ack; +} __packed; + +struct wmi_wmm_params_arg { + u8 acm; + u8 aifs; + u16 cwmin; + u16 cwmax; + u16 txop; + u8 no_ack; +}; + +struct wmi_vdev_set_wmm_params_cmd { + __le32 tlv_header; + __le32 vdev_id; + struct wmi_wmm_params wmm_params[4]; + __le32 wmm_param_type; +} __packed; + +struct wmi_wmm_params_all_arg { + struct wmi_wmm_params_arg ac_be; + struct wmi_wmm_params_arg ac_bk; + struct wmi_wmm_params_arg ac_vi; + struct wmi_wmm_params_arg ac_vo; +}; + +#define ATH12K_TWT_DEF_STA_CONG_TIMER_MS 5000 +#define ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE 10 +#define ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP 50 +#define ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20 +#define ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100 +#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80 +#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50 +#define ATH12K_TWT_DEF_MIN_NO_STA_SETUP 10 +#define ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN 2 +#define ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2 +#define ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS 2 +#define ATH12K_TWT_DEF_MAX_NO_STA_TWT 500 +#define ATH12K_TWT_DEF_MODE_CHECK_INTERVAL 10000 +#define ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000 +#define ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000 + +struct wmi_twt_enable_params_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 sta_cong_timer_ms; + __le32 mbss_support; + __le32 default_slot_size; + __le32 congestion_thresh_setup; + __le32 congestion_thresh_teardown; + __le32 congestion_thresh_critical; + __le32 interference_thresh_teardown; + __le32 interference_thresh_setup; + __le32 min_no_sta_setup; + __le32 min_no_sta_teardown; + __le32 no_of_bcast_mcast_slots; + __le32 min_no_twt_slots; + __le32 max_no_sta_twt; + __le32 mode_check_interval; + __le32 add_sta_slot_interval; + __le32 remove_sta_slot_interval; +} __packed; + +struct wmi_twt_disable_params_cmd { + __le32 tlv_header; + __le32 pdev_id; +} __packed; + +struct wmi_obss_spatial_reuse_params_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 enable; + a_sle32 obss_min; + a_sle32 obss_max; + __le32 vdev_id; +} __packed; + +#define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200 +#define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0 +#define ATH12K_OBSS_COLOR_COLLISION_DETECTION 1 + +#define ATH12K_BSS_COLOR_STA_PERIODS 10000 +#define ATH12K_BSS_COLOR_AP_PERIODS 5000 + +struct wmi_obss_color_collision_cfg_params_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 flags; + __le32 evt_type; + __le32 current_bss_color; + __le32 detection_period_ms; + __le32 scan_period_ms; + __le32 free_slot_expiry_time_ms; +} __packed; + +struct wmi_bss_color_change_enable_params_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enable; +} __packed; + +#define ATH12K_IPV4_TH_SEED_SIZE 5 +#define ATH12K_IPV6_TH_SEED_SIZE 11 + +struct ath12k_wmi_pdev_lro_config_cmd { + __le32 tlv_header; + __le32 lro_enable; + __le32 res; + u32 th_4[ATH12K_IPV4_TH_SEED_SIZE]; + u32 th_6[ATH12K_IPV6_TH_SEED_SIZE]; + __le32 pdev_id; +} __packed; + +#define ATH12K_WMI_SPECTRAL_COUNT_DEFAULT 0 +#define ATH12K_WMI_SPECTRAL_PERIOD_DEFAULT 224 +#define ATH12K_WMI_SPECTRAL_PRIORITY_DEFAULT 1 +#define ATH12K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7 +#define ATH12K_WMI_SPECTRAL_GC_ENA_DEFAULT 1 +#define ATH12K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0 +#define ATH12K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96 +#define ATH12K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80 +#define ATH12K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12 +#define ATH12K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8 +#define ATH12K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0 +#define ATH12K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0 +#define ATH12K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0 +#define ATH12K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0 +#define ATH12K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2 +#define ATH12K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1 +#define ATH12K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1 +#define ATH12K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1 + +struct ath12k_wmi_vdev_spectral_conf_arg { + u32 vdev_id; + u32 scan_count; + u32 scan_period; + u32 scan_priority; + u32 scan_fft_size; + u32 scan_gc_ena; + u32 scan_restart_ena; + u32 scan_noise_floor_ref; + u32 scan_init_delay; + u32 scan_nb_tone_thr; + u32 scan_str_bin_thr; + u32 scan_wb_rpt_mode; + u32 scan_rssi_rpt_mode; + u32 scan_rssi_thr; + u32 scan_pwr_format; + u32 scan_rpt_mode; + u32 scan_bin_scale; + u32 scan_dbm_adj; + u32 scan_chn_mask; +}; + +struct ath12k_wmi_vdev_spectral_conf_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 scan_count; + __le32 scan_period; + __le32 scan_priority; + __le32 scan_fft_size; + __le32 scan_gc_ena; + __le32 scan_restart_ena; + __le32 scan_noise_floor_ref; + __le32 scan_init_delay; + __le32 scan_nb_tone_thr; + __le32 scan_str_bin_thr; + __le32 scan_wb_rpt_mode; + __le32 scan_rssi_rpt_mode; + __le32 scan_rssi_thr; + __le32 scan_pwr_format; + __le32 scan_rpt_mode; + __le32 scan_bin_scale; + __le32 scan_dbm_adj; + __le32 scan_chn_mask; +} __packed; + +#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1 +#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2 +#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1 +#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2 + +struct ath12k_wmi_vdev_spectral_enable_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 trigger_cmd; + __le32 enable_cmd; +} __packed; + +struct ath12k_wmi_pdev_dma_ring_cfg_arg { + u32 tlv_header; + u32 pdev_id; + u32 module_id; + u32 base_paddr_lo; + u32 base_paddr_hi; + u32 head_idx_paddr_lo; + u32 head_idx_paddr_hi; + u32 tail_idx_paddr_lo; + u32 tail_idx_paddr_hi; + u32 num_elems; + u32 buf_size; + u32 num_resp_per_event; + u32 event_timeout_ms; +}; + +struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 module_id; /* see enum wmi_direct_buffer_module */ + __le32 base_paddr_lo; + __le32 base_paddr_hi; + __le32 head_idx_paddr_lo; + __le32 head_idx_paddr_hi; + __le32 tail_idx_paddr_lo; + __le32 tail_idx_paddr_hi; + __le32 num_elems; /* Number of elems in the ring */ + __le32 buf_size; /* size of allocated buffer in bytes */ + + /* Number of wmi_dma_buf_release_entry packed together */ + __le32 num_resp_per_event; + + /* Target should timeout and send whatever resp + * it has if this time expires, units in milliseconds + */ + __le32 event_timeout_ms; +} __packed; + +struct ath12k_wmi_dma_buf_release_fixed_params { + __le32 pdev_id; + __le32 module_id; + __le32 num_buf_release_entry; + __le32 num_meta_data_entry; +} __packed; + +struct ath12k_wmi_dma_buf_release_entry_params { + __le32 tlv_header; + __le32 paddr_lo; + + /* Bits 11:0: address of data + * Bits 31:12: host context data + */ + __le32 paddr_hi; +} __packed; + +#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0) +#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16) + +#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0) + +struct ath12k_wmi_dma_buf_release_meta_data_params { + __le32 tlv_header; + a_sle32 noise_floor[WMI_MAX_CHAINS]; + __le32 reset_delay; + __le32 freq1; + __le32 freq2; + __le32 ch_width; +} __packed; + +enum wmi_fils_discovery_cmd_type { + WMI_FILS_DISCOVERY_CMD, + WMI_UNSOL_BCAST_PROBE_RESP, +}; + +struct wmi_fils_discovery_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 interval; + __le32 config; /* enum wmi_fils_discovery_cmd_type */ +} __packed; + +struct wmi_fils_discovery_tmpl_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 buf_len; +} __packed; + +struct wmi_probe_tmpl_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 buf_len; +} __packed; + +#define WMI_MAX_MEM_REQS 32 + +#define MAX_RADIOS 3 + +#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) +#define WMI_SEND_TIMEOUT_HZ (3 * HZ) + +struct ath12k_wmi_pdev { + struct ath12k_wmi_base *wmi_ab; + enum ath12k_htc_ep_id eid; + const struct wmi_peer_flags_map *peer_flags; + u32 rx_decap_mode; +}; + +struct ath12k_wmi_base { + struct ath12k_base *ab; + struct ath12k_wmi_pdev wmi[MAX_RADIOS]; + enum ath12k_htc_ep_id wmi_endpoint_id[MAX_RADIOS]; + u32 max_msg_len[MAX_RADIOS]; + + struct completion service_ready; + struct completion unified_ready; + DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE); + wait_queue_head_t tx_credits_wq; + const struct wmi_peer_flags_map *peer_flags; + u32 num_mem_chunks; + u32 rx_decap_mode; + struct ath12k_wmi_host_mem_chunk_arg mem_chunks[WMI_MAX_MEM_REQS]; + + enum wmi_host_hw_mode_config_type preferred_hw_mode; + + struct ath12k_wmi_target_cap_arg *targ_cap; +}; + +#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024) + +void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, + struct ath12k_wmi_resource_config_arg *config); +void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, + struct ath12k_wmi_resource_config_arg *config); +int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, + u32 cmd_id); +struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len); +int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, + struct sk_buff *frame); +int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, + struct ieee80211_mutable_offsets *offs, + struct sk_buff *bcn); +int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id); +int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, + const u8 *bssid); +int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id); +int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, + bool restart); +int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, + u32 vdev_id, u32 param_id, u32 param_val); +int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, + u32 param_value, u8 pdev_id); +int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable); +int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab); +int ath12k_wmi_cmd_init(struct ath12k_base *ab); +int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab); +int ath12k_wmi_connect(struct ath12k_base *ab); +int ath12k_wmi_pdev_attach(struct ath12k_base *ab, + u8 pdev_id); +int ath12k_wmi_attach(struct ath12k_base *ab); +void ath12k_wmi_detach(struct ath12k_base *ab); +int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, + struct ath12k_wmi_vdev_create_arg *arg); +int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, + struct ath12k_wmi_peer_create_arg *arg); +int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, + u32 param_id, u32 param_value); + +int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, + u32 param, u32 param_value); +int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms); +int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, + const u8 *peer_addr, u8 vdev_id); +int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id); +void ath12k_wmi_start_scan_init(struct ath12k *ar, + struct ath12k_wmi_scan_req_arg *arg); +int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_req_arg *arg); +int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_cancel_arg *arg); +int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, + struct wmi_wmm_params_all_arg *param); +int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, + u32 pdev_id); +int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id); + +int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, + struct ath12k_wmi_peer_assoc_arg *arg); +int ath12k_wmi_vdev_install_key(struct ath12k *ar, + struct wmi_vdev_install_key_arg *arg); +int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, + enum wmi_bss_chan_info_req_type type); +int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, + u32 vdev_id, u32 pdev_id); +int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar); +int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, + u8 peer_addr[ETH_ALEN], + u32 peer_tid_bitmap, + u8 vdev_id); +int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, + struct ath12k_wmi_ap_ps_arg *arg); +int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, + struct ath12k_wmi_scan_chan_list_arg *arg); +int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, + u32 pdev_id); +int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac); +int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 buf_size); +int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 status); +int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 initiator, u32 reason); +int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, + u32 vdev_id, u32 bcn_ctrl_op); +int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, + struct ath12k_wmi_init_country_arg *arg); +int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, + int vdev_id, const u8 *addr, + dma_addr_t paddr, u8 tid, + u8 ba_window_size_valid, + u32 ba_window_size); +int +ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, + struct ath12k_wmi_rx_reorder_queue_remove_arg *arg); +int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, + struct ath12k_wmi_pdev_set_regdomain_arg *arg); +int ath12k_wmi_simulate_radar(struct ath12k *ar); +int ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id); +int ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id); +int ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, + struct ieee80211_he_obss_pd *he_obss_pd); +int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, + u8 bss_color, u32 period, + bool enable); +int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, + bool enable); +int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, int pdev_id); +int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, + struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg); +int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, + u32 trigger, u32 enable); +int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, + struct ath12k_wmi_vdev_spectral_conf_arg *arg); +int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, + struct sk_buff *tmpl); +int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, + bool unsol_bcast_probe_resp_enabled); +int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, + struct sk_buff *tmpl); +int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, + enum wmi_host_hw_mode_config_type mode); + +#endif diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index a20e0aeae284..0c2b8b1a10d5 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT); mutex_lock(&vif->wdev.mtx); - cfg80211_ch_switch_notify(vif->ndev, &chandef, 0); + cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0); mutex_unlock(&vif->wdev.mtx); } diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 6610d76131fa..7a45f5f62826 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -1277,13 +1277,13 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah) static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array) { -#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x)) - ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]); - ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l], +#define CCK_DELTA(_ah, x) ((OLC_FOR_AR9280_20_LATER(_ah)) ? max((x) - 2, 0) : (x)) + ah->tx_power[0] = CCK_DELTA(ah, rate_array[rate1l]); + ah->tx_power[1] = CCK_DELTA(ah, min(rate_array[rate2l], rate_array[rate2s])); - ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l], + ah->tx_power[2] = CCK_DELTA(ah, min(rate_array[rate5_5l], rate_array[rate5_5s])); - ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l], + ah->tx_power[3] = CCK_DELTA(ah, min(rate_array[rate11l], rate_array[rate11s])); #undef CCK_DELTA } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index fd53b5f9e9b5..c8b3f3aaa45b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -659,9 +659,9 @@ static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset) static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah) { - if (OLC_FOR_AR9287_10_LATER) + if (OLC_FOR_AR9287_10_LATER(ah)) ar9287_hw_olc_temp_compensation(ah); - else if (OLC_FOR_AR9280_20_LATER) + else if (OLC_FOR_AR9280_20_LATER(ah)) ar9280_hw_olc_temp_compensation(ah); } @@ -672,7 +672,7 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, bool nfcal, nfcal_pending = false, percal_pending; int ret; - nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF); + nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF); if (ah->caldata) { nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags); if (longcal) /* Remember to not miss */ @@ -752,11 +752,11 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (IS_CHAN_HT20(chan)) { REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE); REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); - if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL); + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { ath_dbg(common, CALIBRATE, "offset calibration failed to complete in %d ms; noisy environment?\n", @@ -768,10 +768,10 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); } REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); - if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL); + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { ath_dbg(common, CALIBRATE, "offset calibration failed to complete in %d ms; noisy environment?\n", @@ -781,7 +781,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan) REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); return true; } @@ -857,17 +857,17 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (!AR_SREV_9287_11_OR_LATER(ah)) REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); } /* Calibrate the AGC */ - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), + REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); /* Poll for offset calibration complete */ - if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { ath_dbg(common, CALIBRATE, @@ -880,7 +880,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) if (!AR_SREV_9287_11_OR_LATER(ah)) REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index ae68f674829b..d08ea0b28530 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -249,9 +249,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, if (power_off) { /* clear bit 19 to disable L1 */ - REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA); - val = REG_READ(ah, AR_WA); + val = REG_READ(ah, AR_WA(ah)); /* * Set PCIe workaround bits @@ -286,7 +286,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; - REG_WRITE(ah, AR_WA, val); + REG_WRITE(ah, AR_WA(ah), val); } else { if (ah->config.pcie_waen) { val = ah->config.pcie_waen; @@ -314,10 +314,10 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; - REG_WRITE(ah, AR_WA, val); + REG_WRITE(ah, AR_WA(ah), val); /* set bit 19 to allow forcing of pcie core into L1 state */ - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index a8c0e8e2d78c..b70cd4af1ae0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -21,7 +21,7 @@ static void ar9002_hw_rx_enable(struct ath_hw *ah) { - REG_WRITE(ah, AR_CR, AR_CR_RXE); + REG_WRITE(ah, AR_CR, AR_CR_RXE(ah)); } static void ar9002_hw_set_desc_link(void *ds, u32 ds_link) @@ -40,14 +40,14 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, struct ath_common *common = ath9k_hw_common(ah); if (!AR_SREV_9100(ah)) { - if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { - if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) + if (REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah)) & AR_INTR_MAC_IRQ) { + if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah)) == AR_RTC_STATUS_ON) { isr = REG_READ(ah, AR_ISR); } } - sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & + sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) & AR_INTR_SYNC_DEFAULT; *masked = 0; @@ -138,7 +138,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, u32 s5_s; if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { - s5_s = REG_READ(ah, AR_ISR_S5_S); + s5_s = REG_READ(ah, AR_ISR_S5_S(ah)); } else { s5_s = REG_READ(ah, AR_ISR_S5); } @@ -201,8 +201,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); } - REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); - (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); + REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause); + (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah)); } return true; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index ebdb97999335..23ac6b7c2cbd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -281,10 +281,10 @@ static void ar9002_olc_init(struct ath_hw *ah) { u32 i; - if (!OLC_FOR_AR9280_20_LATER) + if (!OLC_FOR_AR9280_20_LATER(ah)) return; - if (OLC_FOR_AR9287_10_LATER) { + if (OLC_FOR_AR9287_10_LATER(ah)) { REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 6ca089f15629..2224cb74b1d4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -346,14 +346,14 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, /* * Clear offset and IQ calibration, run AGC cal. */ - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); - REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, + REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL); + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), + REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); - status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { @@ -367,13 +367,13 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, * (Carrier Leak calibration, TX Filter calibration and * Peak Detector offset calibration). */ - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_PKDET_CAL); ch0_done = 0; @@ -387,10 +387,10 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL); + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), + REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); - status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { @@ -531,7 +531,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, } } - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); @@ -539,7 +539,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, * We don't need to check txiqcal_done here since it is always * set for AR9550. */ - REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, + REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); return true; @@ -897,7 +897,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah, memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff)); for (i = 0; i < MAX_MEASUREMENT / 2; i++) { tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] = - AR_PHY_TX_IQCAL_CORR_COEFF_B0(i); + AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i); if (!AR_SREV_9485(ah)) { tx_corr_coeff[i * 2][1] = tx_corr_coeff[(i * 2) + 1][1] = @@ -914,7 +914,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah, if (!(ah->txchainmask & (1 << i))) continue; nmeasurement = REG_READ_FIELD(ah, - AR_PHY_TX_IQCAL_STATUS_B0, + AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_CALIBRATED_GAINS_0); if (nmeasurement > MAX_MEASUREMENT) @@ -988,10 +988,10 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, AR_PHY_TXGAIN_FORCE, 0); - REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START, + REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START(ah), AR_PHY_TX_IQCAL_START_DO_CAL, 1); - if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START, + if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START(ah), AR_PHY_TX_IQCAL_START_DO_CAL, 0, AH_WAIT_TIMEOUT)) { ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n"); @@ -1056,7 +1056,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); const u32 txiqcal_status[AR9300_MAX_CHAINS] = { - AR_PHY_TX_IQCAL_STATUS_B0, + AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_TX_IQCAL_STATUS_B1, AR_PHY_TX_IQCAL_STATUS_B2, }; @@ -1076,7 +1076,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, continue; nmeasurement = REG_READ_FIELD(ah, - AR_PHY_TX_IQCAL_STATUS_B0, + AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_CALIBRATED_GAINS_0); if (nmeasurement > MAX_MEASUREMENT) nmeasurement = MAX_MEASUREMENT; @@ -1096,7 +1096,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, u32 idx = 2 * j, offset = 4 * (3 * im + j); REG_RMW_FIELD(ah, - AR_PHY_CHAN_INFO_MEMORY, + AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_TAB_S2_READ, 0); @@ -1106,7 +1106,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, offset); REG_RMW_FIELD(ah, - AR_PHY_CHAN_INFO_MEMORY, + AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_TAB_S2_READ, 1); @@ -1161,7 +1161,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah) memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff)); for (i = 0; i < MAX_MEASUREMENT / 2; i++) { tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] = - AR_PHY_TX_IQCAL_CORR_COEFF_B0(i); + AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i); if (!AR_SREV_9485(ah)) { tx_corr_coeff[i * 2][1] = tx_corr_coeff[(i * 2) + 1][1] = @@ -1346,7 +1346,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable) if (!caldata || !(ah->enabled_cals & TX_CL_CAL)) return; - txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & + txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_CLC_SUCCESS); if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) { @@ -1424,12 +1424,12 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah, if (rtt) { if (!run_rtt_cal) { - agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL); + agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah)); agc_supp_cals &= agc_ctrl; agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL); - REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl); } else { if (ah->ah_flags & AH_FASTCC) run_agc_cal = true; @@ -1452,7 +1452,7 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah, goto skip_tx_iqcal; /* Do Tx IQ Calibration */ - REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1, + REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah), AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT); @@ -1462,10 +1462,10 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah, */ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) - REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, + REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); else - REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0, + REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); txiqcal_done = run_agc_cal = true; } @@ -1485,12 +1485,12 @@ skip_tx_iqcal: if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { /* Calibrate the AGC */ - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), + REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); /* Poll for offset calibration complete */ - status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); @@ -1507,7 +1507,7 @@ skip_tx_iqcal: if (rtt && !run_rtt_cal) { agc_ctrl |= agc_supp_cals; - REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl); } if (!status) { @@ -1558,11 +1558,11 @@ static bool do_ar9003_agc_cal(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); bool status; - REG_WRITE(ah, AR_PHY_AGC_CONTROL, - REG_READ(ah, AR_PHY_AGC_CONTROL) | + REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), + REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); - status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { @@ -1596,7 +1596,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, goto skip_tx_iqcal; /* Do Tx IQ Calibration */ - REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1, + REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah), AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT); @@ -1605,7 +1605,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, * AGC calibration. Specifically, AR9550 in SoC chips. */ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { - if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, + if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) { txiqcal_done = true; } else { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 16bfcd0a1f6e..944f46cdf34c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3084,13 +3084,13 @@ error: static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data) { - REG_READ(ah, AR9300_OTP_BASE + (4 * addr)); + REG_READ(ah, AR9300_OTP_BASE(ah) + (4 * addr)); - if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE, + if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS(ah), AR9300_OTP_STATUS_TYPE, AR9300_OTP_STATUS_VALID, 1000)) return false; - *data = REG_READ(ah, AR9300_OTP_READ_DATA); + *data = REG_READ(ah, AR9300_OTP_READ_DATA(ah)); return true; } @@ -3607,15 +3607,15 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) - REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); + REG_RMW_FIELD(ah, AR_CH0_TOP2(ah), AR_CH0_TOP2_XPABIASLVL, bias); else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah)) - REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); + REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias); else { - REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); - REG_RMW_FIELD(ah, AR_CH0_THERM, + REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias); + REG_RMW_FIELD(ah, AR_CH0_THERM(ah), AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2); - REG_RMW_FIELD(ah, AR_CH0_THERM, + REG_RMW_FIELD(ah, AR_CH0_THERM(ah), AR_CH0_THERM_XPASHORT2GND, 1); } } @@ -3960,9 +3960,9 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { int reg_pmu_set; - reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM; - REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); - if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) + reg_pmu_set = REG_READ(ah, AR_PHY_PMU2(ah)) & ~AR_PHY_PMU2_PGM; + REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set); + if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set)) return; if (AR_SREV_9330(ah)) { @@ -3984,28 +3984,28 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) (3 << 24) | (1 << 28); } - REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); - if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set)) + REG_WRITE(ah, AR_PHY_PMU1(ah), reg_pmu_set); + if (!is_pmu_set(ah, AR_PHY_PMU1(ah), reg_pmu_set)) return; - reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000) + reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0xFFC00000) | (4 << 26); - REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); - if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) + REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set); + if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set)) return; - reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000) + reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0x00200000) | (1 << 21); - REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); - if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) + REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set); + if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set)) return; } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9561(ah)) { reg_val = le32_to_cpu(pBase->swreg); - REG_WRITE(ah, AR_PHY_PMU1, reg_val); + REG_WRITE(ah, AR_PHY_PMU1(ah), reg_val); if (AR_SREV_9561(ah)) - REG_WRITE(ah, AR_PHY_PMU2, 0x10200000); + REG_WRITE(ah, AR_PHY_PMU2(ah), 0x10200000); } else { /* Internal regulator is ON. Write swreg register. */ reg_val = le32_to_cpu(pBase->swreg); @@ -4021,25 +4021,25 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) } } else { if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { - REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0); - while (REG_READ_FIELD(ah, AR_PHY_PMU2, + REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0); + while (REG_READ_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM)) udelay(10); - REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); - while (!REG_READ_FIELD(ah, AR_PHY_PMU1, + REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1); + while (!REG_READ_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD)) udelay(10); - REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1); - while (!REG_READ_FIELD(ah, AR_PHY_PMU2, + REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0x1); + while (!REG_READ_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM)) udelay(10); } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) - REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); + REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1); else { - reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | + reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK(ah)) | AR_RTC_FORCE_SWREG_PRD; - REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val); + REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), reg_val); } } @@ -4055,9 +4055,9 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah) if (eep->baseEepHeader.featureEnable & 0x40) { tuning_caps_param &= 0x7f; - REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC, + REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPINDAC, tuning_caps_param); - REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC, + REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPOUTDAC, tuning_caps_param); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index f8ae20318302..b91ef1250ba8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -82,16 +82,16 @@ /* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */ #define AR9300_EEPMISC_LITTLE_ENDIAN 0 -#define AR9300_OTP_BASE \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) -#define AR9300_OTP_STATUS \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) +#define AR9300_OTP_BASE(_ah) \ + ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS(_ah) \ + ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x31018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) +#define AR9300_OTP_READ_DATA(_ah) \ + ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x3101c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 42f00a2a8c80..4f27a9fb1482 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -1032,8 +1032,8 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah, /* Nothing to do on restore for 11N */ if (!power_off /* !restore */) { /* set bit 19 to allow forcing of pcie core into L1 state */ - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); } /* diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index ff8ab58e67d9..a8bc003077dc 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -193,16 +193,16 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, if (ath9k_hw_mci_is_enabled(ah)) async_mask |= AR_INTR_ASYNC_MASK_MCI; - async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah)); if (async_cause & async_mask) { - if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) + if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah)) == AR_RTC_STATUS_ON) isr = REG_READ(ah, AR_ISR); } - sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; + sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) & AR_INTR_SYNC_DEFAULT; *masked = 0; @@ -280,7 +280,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, u32 s5; if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) - s5 = REG_READ(ah, AR_ISR_S5_S); + s5 = REG_READ(ah, AR_ISR_S5_S(ah)); else s5 = REG_READ(ah, AR_ISR_S5); @@ -345,8 +345,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); - REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); - (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); + REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause); + (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah)); } return true; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c index 8d7efd80d97a..2b9c07961cd7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -458,7 +458,7 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah) } else return; - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0); @@ -466,12 +466,12 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah) REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1); - REG_WRITE(ah, AR_OBS, 0x4b); + REG_WRITE(ah, AR_OBS(ah), 0x4b); REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03); REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01); REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02); REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03); - REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, + REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah), AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index b2d53b6c0ffd..83d993fff695 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -201,19 +201,19 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) ar9003_paprd_enable(ah, false); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah), AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1); if (AR_SREV_9485(ah)) { @@ -229,15 +229,15 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) } } - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2(ah), AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1); if (AR_SREV_9485(ah) || @@ -246,10 +246,10 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) AR_SREV_9550(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3); else - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6); val = -10; @@ -257,16 +257,16 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah)) val = -15; - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE, val); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah), AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah), AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah), AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES, 100); REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0, @@ -313,7 +313,7 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain, int desired_scale, desired_gain = 0; u32 reg_olpc = 0, reg_cl_gain = 0; - REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah), AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12, AR_PHY_TPC_12_DESIRED_SCALE_HT40_5); @@ -812,7 +812,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) ar9003_tx_force_gain(ah, gain_index); - REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah), AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); } EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); @@ -833,7 +833,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah, capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3, AR_PHY_65NM_CH0_TXRF3_CAPDIV2G); - quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP); if (quick_drop) @@ -906,7 +906,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3, AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g); - REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah), AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, quick_drop); @@ -932,14 +932,14 @@ int ar9003_paprd_create_curve(struct ath_hw *ah, data_L = &buf[0]; data_U = &buf[48]; - REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY, + REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ); reg = AR_PHY_CHAN_INFO_TAB_0; for (i = 0; i < 48; i++) data_L[i] = REG_READ(ah, reg + (i << 2)); - REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY, + REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ); for (i = 0; i < 48; i++) @@ -951,7 +951,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah, if (ar9003_paprd_retrain_pa_in(ah, caldata, chain)) status = -EINPROGRESS; - REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah), AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); kfree(buf); @@ -977,14 +977,14 @@ bool ar9003_paprd_is_done(struct ath_hw *ah) { int paprd_done, agc2_pwr; - paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, + paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah), AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); if (AR_SREV_9485(ah)) goto exit; if (paprd_done == 0x1) { - agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, + agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah), AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR); ath_dbg(ath9k_hw_common(ah), CALIBRATE, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 090ff0600c81..a29c11f944a5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -296,7 +296,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, cck_spur_freq = cck_spur_freq & 0xfffff; - REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL, + REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f); @@ -314,7 +314,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, } } - REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL, + REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5); REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT, AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0); @@ -352,7 +352,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah) AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0); - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0); @@ -360,7 +360,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah) AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0); - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0); @@ -419,7 +419,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah, AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1); REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index); - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index); @@ -427,7 +427,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah, AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc); REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc); - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0); REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff); @@ -449,7 +449,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah, mask_index); /* A == B */ - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index); @@ -462,7 +462,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah, AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe); /* A == B */ - REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B, + REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah), AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0); } @@ -710,7 +710,7 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, AR_GLB_SWREG_DISCONT_EN_BT_WLAN); - if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, + if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) ah->enabled_cals |= TX_IQ_CAL; else @@ -726,11 +726,11 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) || AR_SREV_9561(ah)) { if (ah->is_clk_25mhz) { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); + REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x17c << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); } else { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); + REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x261 << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); } @@ -1795,7 +1795,7 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah) static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum) { - REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR); + REG_SET_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR); REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); REG_WRITE(ah, AR_CR, AR_CR_RXD); REG_WRITE(ah, AR_DLCL_IFS(qnum), 0); @@ -1808,7 +1808,7 @@ static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum) static void ar9003_hw_tx99_stop(struct ath_hw *ah) { - REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR); + REG_CLR_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR); REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index ad949eb02f3d..57e2b4c89125 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -454,8 +454,8 @@ #define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4) #define AR_PHY_MODE (AR_SM_BASE + 0x8) #define AR_PHY_ACTIVE (AR_SM_BASE + 0xc) -#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20)) -#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24)) +#define AR_PHY_SPUR_MASK_A(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x18 : 0x20)) +#define AR_PHY_SPUR_MASK_B(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x1c : 0x24)) #define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28) #define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c) #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30) @@ -498,7 +498,7 @@ #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0 -#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160)) +#define AR_PHY_TEST(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x15c : 0x160)) #define AR_PHY_TEST_BBB_OBS_SEL 0x780000 #define AR_PHY_TEST_BBB_OBS_SEL_S 19 @@ -509,7 +509,7 @@ #define AR_PHY_TEST_CHAIN_SEL 0xC0000000 #define AR_PHY_TEST_CHAIN_SEL_S 30 -#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164)) +#define AR_PHY_TEST_CTL_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x160 : 0x164)) #define AR_PHY_TEST_CTL_TSTDAC_EN 0x1 #define AR_PHY_TEST_CTL_TSTDAC_EN_S 0 #define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C @@ -524,22 +524,22 @@ #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29 -#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168)) +#define AR_PHY_TSTDAC(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x164 : 0x168)) -#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c)) +#define AR_PHY_CHAN_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x168 : 0x16c)) -#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170)) +#define AR_PHY_CHAN_INFO_MEMORY(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x16c : 0x170)) #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3 -#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174)) -#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178)) -#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c)) -#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180)) -#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190)) -#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194)) +#define AR_PHY_CHNINFO_NOISEPWR(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x170 : 0x174)) +#define AR_PHY_CHNINFO_GAINDIFF(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x174 : 0x178)) +#define AR_PHY_CHNINFO_FINETIM(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x178 : 0x17c)) +#define AR_PHY_CHAN_INFO_GAIN_0(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x17c : 0x180)) +#define AR_PHY_SCRAMBLER_SEED(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x184 : 0x190)) +#define AR_PHY_CCK_TX_CTRL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x188 : 0x194)) -#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4)) +#define AR_PHY_HEAVYCLIP_CTL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x198 : 0x1a4)) #define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8) #define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) #define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c) @@ -611,16 +611,16 @@ #define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) -#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ +#define AR_PHY_TX_IQCAL_CONTROL_0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \ 0x3c4 : 0x444)) -#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ +#define AR_PHY_TX_IQCAL_CONTROL_1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \ 0x3c8 : 0x448)) -#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \ +#define AR_PHY_TX_IQCAL_START(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \ 0x3c4 : 0x440)) -#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ +#define AR_PHY_TX_IQCAL_STATUS_B0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \ 0x3f0 : 0x48c)) -#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ - (AR_SREV_9485(ah) ? \ +#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_ah, _i) (AR_SM_BASE + \ + (AR_SREV_9485(_ah) ? \ 0x3d0 : 0x450) + ((_i) << 2)) #define AR_PHY_RTT_CTRL (AR_SM_BASE + 0x380) @@ -684,8 +684,8 @@ #define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008 #define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3 -#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ - (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280))) +#define AR_CH0_TOP(_ah) (AR_SREV_9300(_ah) ? 0x16288 : \ + (((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x1628c : 0x16280))) #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) #define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8) @@ -705,8 +705,8 @@ #define AR_SWITCH_TABLE_ALL (0xfff) #define AR_SWITCH_TABLE_ALL_S (0) -#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ - ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c)) +#define AR_CH0_THERM(_ah) (AR_SREV_9300(_ah) ? 0x16290 :\ + ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16294 : 0x1628c)) #define AR_CH0_THERM_XPABIASLVL_MSB 0x3 #define AR_CH0_THERM_XPABIASLVL_MSB_S 0 #define AR_CH0_THERM_XPASHORT2GND 0x4 @@ -717,26 +717,26 @@ #define AR_CH0_THERM_SAR_ADC_OUT 0x0000ff00 #define AR_CH0_THERM_SAR_ADC_OUT_S 8 -#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ - (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) +#define AR_CH0_TOP2(_ah) (AR_SREV_9300(_ah) ? 0x1628c : \ + (AR_SREV_9462(_ah) ? 0x16290 : 0x16284)) #define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000) #define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12) -#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ - ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \ - (AR_SREV_9561(ah) ? 0x162c0 : 0x16290))) +#define AR_CH0_XTAL(_ah) (AR_SREV_9300(_ah) ? 0x16294 : \ + ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16298 : \ + (AR_SREV_9561(_ah) ? 0x162c0 : 0x16290))) #define AR_CH0_XTAL_CAPINDAC 0x7f000000 #define AR_CH0_XTAL_CAPINDAC_S 24 #define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 #define AR_CH0_XTAL_CAPOUTDAC_S 17 -#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \ - (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40)) +#define AR_PHY_PMU1(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16340 : \ + (AR_SREV_9561(_ah) ? 0x16cc0 : 0x16c40)) #define AR_PHY_PMU1_PWD 0x1 #define AR_PHY_PMU1_PWD_S 0 -#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \ - (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44)) +#define AR_PHY_PMU2(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16344 : \ + (AR_SREV_9561(_ah) ? 0x16cc4 : 0x16c44)) #define AR_PHY_PMU2_PGM 0x00200000 #define AR_PHY_PMU2_PGM_S 21 @@ -974,7 +974,7 @@ #define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) #define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) -#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \ +#define AR_PHY_PDADC_TAB_1(_ah) (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(_ah) ? \ 0x280 : 0x240)) #define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) #define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff @@ -1152,7 +1152,7 @@ #define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000 #define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17 -#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490)) +#define AR_PHY_PAPRD_TRAINER_CNTL1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x580 : 0x490)) #define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001 #define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0 @@ -1169,12 +1169,12 @@ #define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000 #define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12 -#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494)) +#define AR_PHY_PAPRD_TRAINER_CNTL2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x584 : 0x494)) #define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF #define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0 -#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498)) +#define AR_PHY_PAPRD_TRAINER_CNTL3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x588 : 0x498)) #define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f #define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0 @@ -1191,7 +1191,7 @@ #define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000 #define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29 -#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c)) +#define AR_PHY_PAPRD_TRAINER_CNTL4(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x58c : 0x49c)) #define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000 #define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16 @@ -1211,7 +1211,7 @@ #define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF #define AR_PHY_PAPRD_PRE_POST_SCALING_S 0 -#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0)) +#define AR_PHY_PAPRD_TRAINER_STAT1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x590 : 0x4a0)) #define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001 #define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0 @@ -1226,7 +1226,7 @@ #define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00 #define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9 -#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4)) +#define AR_PHY_PAPRD_TRAINER_STAT2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x594 : 0x4a4)) #define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff #define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0 @@ -1235,7 +1235,7 @@ #define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000 #define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21 -#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8)) +#define AR_PHY_PAPRD_TRAINER_STAT3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x598 : 0x4a8)) #define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff #define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c index bea41df9fbd7..ac32afbf2c97 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c @@ -43,7 +43,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) /* set rx disable bit */ REG_WRITE(ah, AR_CR, AR_CR_RXD); - if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) { + if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE(ah), 0, AH_WAIT_TIMEOUT)) { ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); return; @@ -61,7 +61,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) if (ath9k_hw_mci_is_enabled(ah)) REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); - REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); + REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_ON_INT); } static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) @@ -226,7 +226,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) */ /* do we need to check the bit value 0x01000000 (7-10) ?? */ - REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR, + REG_RMW(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR, AR_PMCTRL_PWR_STATE_D1D3); /* @@ -278,12 +278,12 @@ static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah) * to the external PCI-E reset. We also need to tie * the PCI-E Phy reset to the PCI-E reset. */ - wa_reg = REG_READ(ah, AR_WA); + wa_reg = REG_READ(ah, AR_WA(ah)); wa_reg &= ~AR_WA_UNTIE_RESET_EN; wa_reg |= AR_WA_RESET_EN; wa_reg |= AR_WA_POR_SHORT; - REG_WRITE(ah, AR_WA, wa_reg); + REG_WRITE(ah, AR_WA(ah), wa_reg); } void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) @@ -309,11 +309,11 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * Set and clear WOW_PME_CLEAR for the chip * to generate next wow signal. */ - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN | + REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA | AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR); - REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR); + REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR); /* * Random Backoff. @@ -414,7 +414,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) /* * Set the power states appropriately and enable PME. */ - host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL); + host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL(ah)); host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA; @@ -430,7 +430,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL; } - REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl); + REG_WRITE(ah, AR_PCIE_PM_CTRL(ah), host_pm_ctrl); /* * Enable sequence number generation when asleep. diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 618c9df35fc1..9b393a8f7c3a 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -173,16 +173,16 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; /* connect bt_active to baseband */ - REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL, + REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF | AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF)); - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB); /* Set input mux for bt_active to gpio pin */ if (!AR_SREV_SOC(ah)) - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah), AR_GPIO_INPUT_MUX1_BT_ACTIVE, btcoex_hw->btactive_gpio); @@ -197,17 +197,17 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; /* btcoex 3-wire */ - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB | AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB)); /* Set input mux for bt_prority_async and * bt_active_async to GPIO pins */ if (!AR_SREV_SOC(ah)) { - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah), AR_GPIO_INPUT_MUX1_BT_ACTIVE, btcoex_hw->btactive_gpio); - REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah), AR_GPIO_INPUT_MUX1_BT_PRIORITY, btcoex_hw->btpriority_gpio); } @@ -404,7 +404,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah) if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI && !AR_SREV_SOC(ah)) { - REG_RMW(ah, AR_GPIO_PDPU, + REG_RMW(ah, AR_GPIO_PDPU(ah), (0x2 << (btcoex_hw->btactive_gpio * 2)), (0x3 << (btcoex_hw->btactive_gpio * 2))); } diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 0422a33395b7..fb270df75eb2 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -231,17 +231,17 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) if (ah->caldata) set_bit(NFCAL_PENDING, &ah->caldata->cal_flags); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_ENABLE_NF); if (update) - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NO_UPDATE_NF); else - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NO_UPDATE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF); } int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) @@ -251,7 +251,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal; - u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL); + u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah)); if (ah->caldata) h = ah->caldata->nfCalHist; @@ -286,7 +286,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * (or after end rx/tx frame if ongoing) */ if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF); REG_RMW_BUFFER_FLUSH(ah); ENABLE_REG_RMW_BUFFER(ah); } @@ -295,11 +295,11 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * Load software filtered NF value into baseband internal minCCApwr * variable. */ - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_ENABLE_NF); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NO_UPDATE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF); REG_RMW_BUFFER_FLUSH(ah); /* @@ -309,7 +309,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * (11n max length 22.1 msec) */ for (j = 0; j < 22200; j++) { - if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & + if ((REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(10); @@ -321,12 +321,12 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { ENABLE_REG_RMW_BUFFER(ah); if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF) - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_ENABLE_NF); if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF) - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NO_UPDATE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF); REG_RMW_BUFFER_FLUSH(ah); } @@ -342,7 +342,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) if (j == 22200) { ath_dbg(common, ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", - REG_READ(ah, AR_PHY_AGC_CONTROL)); + REG_READ(ah, AR_PHY_AGC_CONTROL(ah))); return -ETIMEDOUT; } @@ -410,7 +410,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) struct ieee80211_channel *c = chan->chan; struct ath9k_hw_cal_data *caldata = ah->caldata; - if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { + if (REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF) { ath_dbg(common, CALIBRATE, "NF did not complete in calibration window\n"); return false; @@ -478,7 +478,7 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah) */ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags)) ath9k_hw_start_nfcal(ah, true); - else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF)) + else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF)) ath9k_hw_getnf(ah, ah->curchan); set_bit(NFCAL_INTF, &caldata->cal_flags); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 31390af6c33e..f1cde43fcb55 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -68,8 +68,8 @@ #define AR5416_EEPROM_OFFSET 0x2000 #define AR5416_EEPROM_MAX 0xae0 -#define AR5416_EEPROM_START_ADDR \ - (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 +#define AR5416_EEPROM_START_ADDR(_ah) \ + (AR_SREV_9100(_ah)) ? 0x1fff1000 : 0x503f1200 #define SD_NO_CTL 0xE0 #define NO_CTL 0xff @@ -110,10 +110,10 @@ #define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) #define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) -#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ - ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) -#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \ - ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) +#define OLC_FOR_AR9280_20_LATER(_ah) (AR_SREV_9280_20_OR_LATER(_ah) && \ + _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL)) +#define OLC_FOR_AR9287_10_LATER(_ah) (AR_SREV_9287_11_OR_LATER(_ah) && \ + _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL)) #define EEP_RFSILENT_ENABLED 0x0001 #define EEP_RFSILENT_ENABLED_S 0 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 9729a69d3e2e..7685f8ab371e 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -800,7 +800,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, numPiers = AR5416_NUM_5G_CAL_PIERS; } - if (OLC_FOR_AR9280_20_LATER && IS_CHAN_2GHZ(chan)) { + if (OLC_FOR_AR9280_20_LATER(ah) && IS_CHAN_2GHZ(chan)) { pRawDataset = pEepData->calPierData2G[0]; ah->initPDADC = ((struct calDataPerFreqOpLoop *) pRawDataset)->vpdPdg[0][0]; @@ -841,7 +841,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, pRawDataset = pEepData->calPierData5G[i]; - if (OLC_FOR_AR9280_20_LATER) { + if (OLC_FOR_AR9280_20_LATER(ah)) { u8 pcdacIdx; u8 txPower; @@ -869,7 +869,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, ENABLE_REGWRITE_BUFFER(ah); - if (OLC_FOR_AR9280_20_LATER) { + if (OLC_FOR_AR9280_20_LATER(ah)) { REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, SM(0x6, @@ -1203,7 +1203,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, | ATH9K_POW_SM(ratesArray[rate24mb], 0)); if (IS_CHAN_2GHZ(chan)) { - if (OLC_FOR_AR9280_20_LATER) { + if (OLC_FOR_AR9280_20_LATER(ah)) { cck_ofdm_delta = 2; REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(RT_AR_DELTA(rate2s), 24) @@ -1259,7 +1259,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_4] + ht40PowerIncForPdadc, 0)); - if (OLC_FOR_AR9280_20_LATER) { + if (OLC_FOR_AR9280_20_LATER(ah)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(RT_AR_DELTA(rateExtCck), 16) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 1a2e0c7eeb02..f521dfa2f194 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -561,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, memcpy(ptr, skb->data, rx_remain_len); rx_pkt_len += rx_remain_len; - hif_dev->rx_remain_len = 0; skb_put(remain_skb, rx_pkt_len); skb_pool[pool_index++] = remain_skb; - + hif_dev->remain_skb = NULL; + hif_dev->rx_remain_len = 0; } else { index = rx_remain_len; } @@ -584,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, pkt_len = get_unaligned_le16(ptr + index); pkt_tag = get_unaligned_le16(ptr + index + 2); + /* It is supposed that if we have an invalid pkt_tag or + * pkt_len then the whole input SKB is considered invalid + * and dropped; the associated packets already in skb_pool + * are dropped, too. + */ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) { RX_STAT_INC(hif_dev, skb_dropped); - return; + goto invalid_pkt; } if (pkt_len > 2 * MAX_RX_BUF_SIZE) { dev_err(&hif_dev->udev->dev, "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); RX_STAT_INC(hif_dev, skb_dropped); - return; + goto invalid_pkt; } pad_len = 4 - (pkt_len & 0x3); @@ -605,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, if (index > MAX_RX_BUF_SIZE) { spin_lock(&hif_dev->rx_lock); - hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; - hif_dev->rx_transfer_len = - MAX_RX_BUF_SIZE - chk_idx - 4; - hif_dev->rx_pad_len = pad_len; - nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, @@ -617,6 +617,12 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, spin_unlock(&hif_dev->rx_lock); goto err; } + + hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; + hif_dev->rx_transfer_len = + MAX_RX_BUF_SIZE - chk_idx - 4; + hif_dev->rx_pad_len = pad_len; + skb_reserve(nskb, 32); RX_STAT_INC(hif_dev, skb_allocated); @@ -654,6 +660,13 @@ err: skb_pool[i]->len, USB_WLAN_RX_PIPE); RX_STAT_INC(hif_dev, skb_completed); } + return; +invalid_pkt: + for (i = 0; i < pool_index; i++) { + dev_kfree_skb_any(skb_pool[i]); + RX_STAT_INC(hif_dev, skb_dropped); + } + return; } static void ath9k_hif_usb_rx_cb(struct urb *urb) @@ -1411,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) if (hif_dev->flags & HIF_USB_READY) { ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); - ath9k_hif_usb_dev_deinit(hif_dev); - ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv); ath9k_htc_hw_free(hif_dev->htc_handle); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 07ac88fb1c57..dae3d9c7b640 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -523,13 +523,13 @@ static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data) (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); if (!ath9k_hw_wait(ah, - AR_EEPROM_STATUS_DATA, + AR_EEPROM_STATUS_DATA(ah), AR_EEPROM_STATUS_DATA_BUSY | AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, AH_WAIT_TIMEOUT)) return false; - *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA), + *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA(ah)), AR_EEPROM_STATUS_DATA_VAL); return true; @@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) ath9k_deinit_device(htc_handle->drv_priv); ath9k_stop_wmi(htc_handle->drv_priv); + ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev); + ath9k_destroy_wmi(htc_handle->drv_priv); ieee80211_free_hw(htc_handle->drv_priv->hw); } } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index ca05b07a45e6..fe62ff668f75 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, * HTC Messages are handled directly here and the obtained SKB * is freed. * - * Service messages (Data, WMI) passed to the corresponding + * Service messages (Data, WMI) are passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, @@ -478,6 +478,8 @@ invalid: if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); + else + goto invalid; } } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 172081ffe477..5982e0db45f9 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -266,7 +266,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) case AR9300_DEVID_AR9330: ah->hw_version.macVersion = AR_SREV_VERSION_9330; if (!ah->get_mac_revision) { - val = REG_READ(ah, AR_SREV); + val = REG_READ(ah, AR_SREV(ah)); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); } return true; @@ -284,7 +284,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) return true; } - srev = REG_READ(ah, AR_SREV); + srev = REG_READ(ah, AR_SREV(ah)); if (srev == -1) { ath_err(ath9k_hw_common(ah), @@ -292,7 +292,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) return false; } - val = srev & AR_SREV_ID; + val = srev & AR_SREV_ID(ah); if (val == 0xFF) { val = srev; @@ -601,12 +601,12 @@ static int __ath9k_hw_init(struct ath_hw *ah) } /* - * Read back AR_WA into a permanent copy and set bits 14 and 17. + * Read back AR_WA(ah) into a permanent copy and set bits 14 and 17. * We need to do this to avoid RMW of this register. We cannot * read the reg when chip is asleep. */ if (AR_SREV_9300_20_OR_LATER(ah)) { - ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal = REG_READ(ah, AR_WA(ah)); ah->WARegVal |= (AR_WA_D3_L1_DISABLE | AR_WA_ASPM_TIMER_BASED_DISABLE); } @@ -618,7 +618,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (AR_SREV_9565(ah)) { ah->WARegVal |= AR_WA_BIT22; - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); } ath9k_hw_init_defaults(ah); @@ -814,7 +814,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, AR_CH0_DPLL3_PHASE_SHIFT, 0x1); - REG_WRITE(ah, AR_RTC_PLL_CONTROL, + REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah), pll | AR_RTC_9300_PLL_BYPASS); udelay(1000); @@ -832,7 +832,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, AR_SREV_9561(ah)) { u32 regval, pll2_divint, pll2_divfrac, refdiv; - REG_WRITE(ah, AR_RTC_PLL_CONTROL, + REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah), pll | AR_RTC_9300_SOC_PLL_BYPASS); udelay(1000); @@ -911,7 +911,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, if (AR_SREV_9565(ah)) pll |= 0x40000; - REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); + REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah), pll); if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || AR_SREV_9550(ah)) @@ -925,7 +925,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, udelay(RTC_PLL_SETTLE_DELAY); - REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); + REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), AR_RTC_FORCE_DERIVED_CLK); } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, @@ -977,7 +977,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); if (ah->msi_enabled) { - ah->msi_reg = REG_READ(ah, AR_PCIE_MSI); + ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah)); ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN; ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64; REG_WRITE(ah, AR_INTCFG, msi_cfg); @@ -987,18 +987,18 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, } if (!AR_SREV_9100(ah)) { - REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); - REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); - REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); + REG_WRITE(ah, AR_INTR_SYNC_CAUSE(ah), 0xFFFFFFFF); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default); + REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), 0); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); - REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0); - REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0); - REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), 0); + REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE(ah), 0); + REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK(ah), 0); } } @@ -1341,7 +1341,7 @@ static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type) return false; } - REG_WRITE(ah, AR_RTC_RESET, 1); + REG_WRITE(ah, AR_RTC_RESET(ah), 1); } return true; @@ -1353,26 +1353,26 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) u32 tmpReg; if (AR_SREV_9100(ah)) { - REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK, + REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK(ah), AR_RTC_DERIVED_CLK_PERIOD, 1); - (void)REG_READ(ah, AR_RTC_DERIVED_CLK); + (void)REG_READ(ah, AR_RTC_DERIVED_CLK(ah)); } ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); udelay(10); } - REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (AR_SREV_9100(ah)) { rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { - tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); + tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)); if (AR_SREV_9340(ah)) tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT; else @@ -1381,7 +1381,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) if (tmpReg) { u32 val; - REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0); val = AR_RC_HOSTIF; if (!AR_SREV_9300_20_OR_LATER(ah)) @@ -1414,7 +1414,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ); } - REG_WRITE(ah, AR_RTC_RC, rst_flags); + REG_WRITE(ah, AR_RTC_RC(ah), rst_flags); REGWRITE_BUFFER_FLUSH(ah); @@ -1425,8 +1425,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) else udelay(100); - REG_WRITE(ah, AR_RTC_RC, 0); - if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { + REG_WRITE(ah, AR_RTC_RC(ah), 0); + if (!ath9k_hw_wait(ah, AR_RTC_RC(ah), AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n"); return false; } @@ -1445,17 +1445,17 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); udelay(10); } - REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); - REG_WRITE(ah, AR_RTC_RESET, 0); + REG_WRITE(ah, AR_RTC_RESET(ah), 0); REGWRITE_BUFFER_FLUSH(ah); @@ -1464,11 +1464,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, 0); - REG_WRITE(ah, AR_RTC_RESET, 1); + REG_WRITE(ah, AR_RTC_RESET(ah), 1); if (!ath9k_hw_wait(ah, - AR_RTC_STATUS, - AR_RTC_STATUS_M, + AR_RTC_STATUS(ah), + AR_RTC_STATUS_M(ah), AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n"); @@ -1483,11 +1483,11 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) bool ret = false; if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); udelay(10); } - REG_WRITE(ah, AR_RTC_FORCE_WAKE, + REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (!ah->reset_power_on) @@ -1521,7 +1521,7 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah, else reset_type = ATH9K_RESET_COLD; } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) || - (REG_READ(ah, AR_CR) & AR_CR_RXE)) + (REG_READ(ah, AR_CR) & AR_CR_RXE(ah))) reset_type = ATH9K_RESET_COLD; if (!ath9k_hw_set_reset_reg(ah, reset_type)) @@ -1955,7 +1955,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_settsf64(ah, tsf + tsf_offset); if (AR_SREV_9280_20_OR_LATER(ah)) - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_enable_async_fifo(ah); @@ -2017,7 +2017,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_set_dma(ah); if (!ath9k_hw_mci_is_enabled(ah)) - REG_WRITE(ah, AR_OBS, 8); + REG_WRITE(ah, AR_OBS(ah), 8); ENABLE_REG_RMW_BUFFER(ah); if (ah->config.rx_intr_mitigation) { @@ -2111,7 +2111,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah) * Clear the RTC force wake bit to allow the * mac to go to sleep. */ - REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN); if (ath9k_hw_mci_is_enabled(ah)) udelay(100); @@ -2121,13 +2121,13 @@ static void ath9k_set_power_sleep(struct ath_hw *ah) /* Shutdown chip. Active low */ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { - REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); + REG_CLR_BIT(ah, AR_RTC_RESET(ah), AR_RTC_RESET_EN); udelay(2); } - /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ + /* Clear Bit 14 of AR_WA(ah) after putting chip into Full Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } /* @@ -2143,7 +2143,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah) if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { /* Set WakeOnInterrupt bit; clear ForceWake bit */ - REG_WRITE(ah, AR_RTC_FORCE_WAKE, + REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_ON_INT); } else { @@ -2163,15 +2163,15 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah) * Clear the RTC force wake bit to allow the * mac to go to sleep. */ - REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN); if (ath9k_hw_mci_is_enabled(ah)) udelay(30); } - /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ + /* Clear Bit 14 of AR_WA(ah) after putting chip into Net Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } static bool ath9k_hw_set_power_awake(struct ath_hw *ah) @@ -2179,14 +2179,14 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah) u32 val; int i; - /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ + /* Set Bits 14 and 17 of AR_WA(ah) before powering on the chip. */ if (AR_SREV_9300_20_OR_LATER(ah)) { - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA(ah), ah->WARegVal); udelay(10); } - if ((REG_READ(ah, AR_RTC_STATUS) & - AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { + if ((REG_READ(ah, AR_RTC_STATUS(ah)) & + AR_RTC_STATUS_M(ah)) == AR_RTC_STATUS_SHUTDOWN) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { return false; } @@ -2194,10 +2194,10 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah) ath9k_hw_init_pll(ah, NULL); } if (AR_SREV_9100(ah)) - REG_SET_BIT(ah, AR_RTC_RESET, + REG_SET_BIT(ah, AR_RTC_RESET(ah), AR_RTC_RESET_EN); - REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN); if (AR_SREV_9100(ah)) mdelay(10); @@ -2205,11 +2205,11 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah) udelay(50); for (i = POWER_UP_TIME / 50; i > 0; i--) { - val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; + val = REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah); if (val == AR_RTC_STATUS_ON) break; udelay(50); - REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN); } if (i == 0) { @@ -2701,16 +2701,16 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) u32 gpio_shift, tmp; if (gpio > 11) - addr = AR_GPIO_OUTPUT_MUX3; + addr = AR_GPIO_OUTPUT_MUX3(ah); else if (gpio > 5) - addr = AR_GPIO_OUTPUT_MUX2; + addr = AR_GPIO_OUTPUT_MUX2(ah); else - addr = AR_GPIO_OUTPUT_MUX1; + addr = AR_GPIO_OUTPUT_MUX1(ah); gpio_shift = (gpio % 6) * 5; if (AR_SREV_9280_20_OR_LATER(ah) || - (addr != AR_GPIO_OUTPUT_MUX1)) { + (addr != AR_GPIO_OUTPUT_MUX1(ah))) { REG_RMW(ah, addr, (type << gpio_shift), (0x1f << gpio_shift)); } else { @@ -2754,13 +2754,13 @@ static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out, AR7010_GPIO_OE_MASK << gpio_shift); } else if (AR_SREV_SOC(ah)) { gpio_set = out ? 1 : 0; - REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift, + REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift, gpio_set << gpio_shift); } else { gpio_shift = gpio << 1; gpio_set = out ? AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO; - REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift, + REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift, AR_GPIO_OE_OUT_DRV << gpio_shift); if (out) @@ -2813,7 +2813,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) u32 val = 0xffffffff; #define MS_REG_READ(x, y) \ - (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y)) + (MS(REG_READ(ah, AR_GPIO_IN_OUT(ah)), x##_GPIO_IN_VAL) & BIT(y)) WARN_ON(gpio >= ah->caps.num_gpio_pins); @@ -2829,7 +2829,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) else if (AR_DEVID_7010(ah)) val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio); else if (AR_SREV_9300_20_OR_LATER(ah)) - val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio); + val = REG_READ(ah, AR_GPIO_IN(ah)) & BIT(gpio); else val = MS_REG_READ(AR, gpio); } else if (BIT(gpio) & ah->caps.gpio_requested) { @@ -2853,7 +2853,7 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) if (BIT(gpio) & ah->caps.gpio_mask) { u32 out_addr = AR_DEVID_7010(ah) ? - AR7010_GPIO_OUT : AR_GPIO_IN_OUT; + AR7010_GPIO_OUT : AR_GPIO_IN_OUT(ah); REG_RMW(ah, out_addr, val << gpio, BIT(gpio)); } else if (BIT(gpio) & ah->caps.gpio_requested) { diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 58d02c19b6d0..b070403e083f 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -707,7 +707,7 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) /* Wait for rx enable bit to go low */ for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { - if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) + if ((REG_READ(ah, AR_CR) & AR_CR_RXE(ah)) == 0) break; if (!AR_SREV_9300_20_OR_LATER(ah)) { @@ -762,14 +762,14 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) if (AR_SREV_9100(ah)) return true; - host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah)); if (((host_isr & AR_INTR_MAC_IRQ) || (host_isr & AR_INTR_ASYNC_MASK_MCI)) && (host_isr != AR_INTR_SPURIOUS)) return true; - host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); + host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)); if ((host_isr & AR_INTR_SYNC_DEFAULT) && (host_isr != AR_INTR_SPURIOUS)) return true; @@ -786,11 +786,11 @@ void ath9k_hw_kill_interrupts(struct ath_hw *ah) REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); if (!AR_SREV_9100(ah)) { - REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); - (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), 0); + (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE(ah)); - REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); - (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0); + (void) REG_READ(ah, AR_INTR_SYNC_ENABLE(ah)); } } EXPORT_SYMBOL(ath9k_hw_kill_interrupts); @@ -824,11 +824,11 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) ath_dbg(common, INTERRUPT, "enable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_ENABLE); if (!AR_SREV_9100(ah)) { - REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask); - REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask); + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), async_mask); + REG_WRITE(ah, AR_INTR_ASYNC_MASK(ah), async_mask); - REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); - REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default); + REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), sync_default); } ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); @@ -841,26 +841,26 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) ath_dbg(ath9k_hw_common(ah), INTERRUPT, "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask); - REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask); - REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), ah->msi_mask); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), ah->msi_mask); ath_dbg(ath9k_hw_common(ah), INTERRUPT, "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n", - REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE), - REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK)); + REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah)), + REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK(ah))); if (ah->msi_reg == 0) - ah->msi_reg = REG_READ(ah, AR_PCIE_MSI); + ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah)); ath_dbg(ath9k_hw_common(ah), INTERRUPT, "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n", - AR_PCIE_MSI, ah->msi_reg); + AR_PCIE_MSI(ah), ah->msi_reg); i = 0; do { - REG_WRITE(ah, AR_PCIE_MSI, + REG_WRITE(ah, AR_PCIE_MSI(ah), (ah->msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask); - _msi_reg = REG_READ(ah, AR_PCIE_MSI); + _msi_reg = REG_READ(ah, AR_PCIE_MSI(ah)); i++; } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200); @@ -918,8 +918,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) if (ah->msi_enabled) { ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n"); - REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); - REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0); + REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah)); } ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints); diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index a074e23013c5..a09f9d223f3d 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -804,14 +804,14 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); if (!ath9k_hw_wait(ah, - AR_EEPROM_STATUS_DATA, + AR_EEPROM_STATUS_DATA(ah), AR_EEPROM_STATUS_DATA_BUSY | AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, AH_WAIT_TIMEOUT)) { return false; } - *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA), + *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA(ah)), AR_EEPROM_STATUS_DATA_VAL); return true; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 8983ea6fc727..9f5b8a538071 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -20,7 +20,7 @@ #include "../reg.h" #define AR_CR 0x0008 -#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004) +#define AR_CR_RXE(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x0000000c : 0x00000004) #define AR_CR_RXD 0x00000020 #define AR_CR_SWI 0x00000040 @@ -352,10 +352,10 @@ #define AR_ISR_S1_QCU_TXEOL 0x03FF0000 #define AR_ISR_S1_QCU_TXEOL_S 16 -#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc) -#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0) -#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4) -#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8) +#define AR_ISR_S2_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d0 : 0x00cc) +#define AR_ISR_S3_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d4 : 0x00d0) +#define AR_ISR_S4_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d8 : 0x00d4) +#define AR_ISR_S5_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00dc : 0x00d8) #define AR_DMADBG_0 0x00e0 #define AR_DMADBG_1 0x00e4 #define AR_DMADBG_2 0x00e8 @@ -699,7 +699,7 @@ #define AR_RC_APB 0x00000002 #define AR_RC_HOSTIF 0x00000100 -#define AR_WA (AR_SREV_9340(ah) ? 0x40c4 : 0x4004) +#define AR_WA(_ah) (AR_SREV_9340(_ah) ? 0x40c4 : 0x4004) #define AR_WA_BIT6 (1 << 6) #define AR_WA_BIT7 (1 << 7) #define AR_WA_BIT23 (1 << 23) @@ -721,7 +721,7 @@ #define AR_PM_STATE 0x4008 #define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000 -#define AR_HOST_TIMEOUT (AR_SREV_9340(ah) ? 0x4008 : 0x4018) +#define AR_HOST_TIMEOUT(_ah) (AR_SREV_9340(_ah) ? 0x4008 : 0x4018) #define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF #define AR_HOST_TIMEOUT_APB_CNTR_S 0 #define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000 @@ -750,12 +750,12 @@ #define EEPROM_PROTECT_RP_1024_2047 0x4000 #define EEPROM_PROTECT_WP_1024_2047 0x8000 -#define AR_SREV \ - ((AR_SREV_9100(ah)) ? 0x0600 : (AR_SREV_9340(ah) \ +#define AR_SREV(_ah) \ + ((AR_SREV_9100(_ah)) ? 0x0600 : (AR_SREV_9340(_ah) \ ? 0x400c : 0x4020)) -#define AR_SREV_ID \ - ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF) +#define AR_SREV_ID(_ah) \ + ((AR_SREV_9100(_ah)) ? 0x00000FFF : 0x000000FF) #define AR_SREV_VERSION 0x000000F0 #define AR_SREV_VERSION_S 4 #define AR_SREV_REVISION 0x00000007 @@ -1038,11 +1038,11 @@ enum ath_usb_dev { #define AR_INTR_SPURIOUS 0xFFFFFFFF -#define AR_INTR_SYNC_CAUSE (AR_SREV_9340(ah) ? 0x4010 : 0x4028) -#define AR_INTR_SYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4010 : 0x4028) +#define AR_INTR_SYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028) +#define AR_INTR_SYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028) -#define AR_INTR_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4014 : 0x402c) +#define AR_INTR_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4014 : 0x402c) #define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000 #define AR_INTR_SYNC_ENABLE_GPIO_S 18 @@ -1084,18 +1084,18 @@ enum { }; -#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030) +#define AR_INTR_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4018 : 0x4030) #define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 #define AR_INTR_ASYNC_MASK_GPIO_S 18 #define AR_INTR_ASYNC_MASK_MCI 0x00000080 #define AR_INTR_ASYNC_MASK_MCI_S 7 -#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034) +#define AR_INTR_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x401c : 0x4034) #define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 #define AR_INTR_SYNC_MASK_GPIO_S 18 -#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038) -#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038) +#define AR_INTR_ASYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038) +#define AR_INTR_ASYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038) #define AR_INTR_ASYNC_CAUSE_MCI 0x00000080 #define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \ AR_INTR_ASYNC_CAUSE_MCI) @@ -1105,13 +1105,13 @@ enum { #define AR_INTR_ASYNC_ENABLE_MCI_S 7 -#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c) +#define AR_INTR_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4024 : 0x403c) #define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 #define AR_INTR_ASYNC_ENABLE_GPIO_S 18 #define AR_PCIE_SERDES 0x4040 #define AR_PCIE_SERDES2 0x4044 -#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014) +#define AR_PCIE_PM_CTRL(_ah) (AR_SREV_9340(_ah) ? 0x4004 : 0x4014) #define AR_PCIE_PM_CTRL_ENA 0x00080000 #define AR_PCIE_PHY_REG3 0x18c08 @@ -1156,7 +1156,7 @@ enum { #define AR9580_GPIO_MASK 0x0000F4FF #define AR7010_GPIO_MASK 0x0000FFFF -#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048) +#define AR_GPIO_IN_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4028 : 0x4048) #define AR_GPIO_IN_VAL 0x0FFFC000 #define AR_GPIO_IN_VAL_S 14 #define AR928X_GPIO_IN_VAL 0x000FFC00 @@ -1170,12 +1170,12 @@ enum { #define AR7010_GPIO_IN_VAL 0x0000FFFF #define AR7010_GPIO_IN_VAL_S 0 -#define AR_GPIO_IN (AR_SREV_9340(ah) ? 0x402c : 0x404c) +#define AR_GPIO_IN(_ah) (AR_SREV_9340(_ah) ? 0x402c : 0x404c) #define AR9300_GPIO_IN_VAL 0x0001FFFF #define AR9300_GPIO_IN_VAL_S 0 -#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)) +#define AR_GPIO_OE_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4030 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4050 : 0x404c)) #define AR_GPIO_OE_OUT_DRV 0x3 #define AR_GPIO_OE_OUT_DRV_NO 0x0 #define AR_GPIO_OE_OUT_DRV_LOW 0x1 @@ -1197,13 +1197,13 @@ enum { #define AR7010_GPIO_INT_MASK 0x52024 #define AR7010_GPIO_FUNCTION 0x52028 -#define AR_GPIO_INTR_POL (AR_SREV_9340(ah) ? 0x4038 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)) +#define AR_GPIO_INTR_POL(_ah) (AR_SREV_9340(_ah) ? 0x4038 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4058 : 0x4050)) #define AR_GPIO_INTR_POL_VAL 0x0001FFFF #define AR_GPIO_INTR_POL_VAL_S 0 -#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9340(ah) ? 0x403c : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)) +#define AR_GPIO_INPUT_EN_VAL(_ah) (AR_SREV_9340(_ah) ? 0x403c : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x405c : 0x4054)) #define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004 #define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2 #define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008 @@ -1221,15 +1221,15 @@ enum { #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 #define AR_GPIO_JTAG_DISABLE 0x00020000 -#define AR_GPIO_INPUT_MUX1 (AR_SREV_9340(ah) ? 0x4040 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)) +#define AR_GPIO_INPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4040 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4060 : 0x4058)) #define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000 #define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16 #define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00 #define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8 -#define AR_GPIO_INPUT_MUX2 (AR_SREV_9340(ah) ? 0x4044 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)) +#define AR_GPIO_INPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x4044 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4064 : 0x405c)) #define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f #define AR_GPIO_INPUT_MUX2_CLK25_S 0 #define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 @@ -1237,18 +1237,18 @@ enum { #define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 #define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 -#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9340(ah) ? 0x4048 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)) -#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9340(ah) ? 0x404c : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)) -#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9340(ah) ? 0x4050 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)) +#define AR_GPIO_OUTPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4048 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4068 : 0x4060)) +#define AR_GPIO_OUTPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x404c : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x406c : 0x4064)) +#define AR_GPIO_OUTPUT_MUX3(_ah) (AR_SREV_9340(_ah) ? 0x4050 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4070 : 0x4068)) -#define AR_INPUT_STATE (AR_SREV_9340(ah) ? 0x4054 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)) +#define AR_INPUT_STATE(_ah) (AR_SREV_9340(_ah) ? 0x4054 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4074 : 0x406c)) -#define AR_EEPROM_STATUS_DATA (AR_SREV_9340(ah) ? 0x40c8 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)) +#define AR_EEPROM_STATUS_DATA(_ah) (AR_SREV_9340(_ah) ? 0x40c8 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4084 : 0x407c)) #define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff #define AR_EEPROM_STATUS_DATA_VAL_S 0 #define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 @@ -1256,13 +1256,13 @@ enum { #define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 #define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 -#define AR_OBS (AR_SREV_9340(ah) ? 0x405c : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)) +#define AR_OBS(_ah) (AR_SREV_9340(_ah) ? 0x405c : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4088 : 0x4080)) -#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088) +#define AR_GPIO_PDPU(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4090 : 0x4088) -#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \ - (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)) +#define AR_PCIE_MSI(_ah) (AR_SREV_9340(_ah) ? 0x40d8 : \ + (AR_SREV_9300_20_OR_LATER(_ah) ? 0x40a4 : 0x4094)) #define AR_PCIE_MSI_ENABLE 0x00000001 #define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000 #define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */ @@ -1272,10 +1272,10 @@ enum { #define AR_INTR_PRIO_RXLP 0x00000002 #define AR_INTR_PRIO_RXHP 0x00000004 -#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4) -#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8) -#define AR_INTR_PRIO_SYNC_MASK (AR_SREV_9340(ah) ? 0x4090 : 0x40cc) -#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4) +#define AR_INTR_PRIO_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4088 : 0x40c4) +#define AR_INTR_PRIO_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x408c : 0x40c8) +#define AR_INTR_PRIO_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4090 : 0x40cc) +#define AR_INTR_PRIO_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4094 : 0x40d4) #define AR_ENT_OTP 0x40d8 #define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000 #define AR_ENT_OTP_49GHZ_DISABLE 0x00100000 @@ -1339,8 +1339,8 @@ enum { #define AR_RTC_9160_PLL_CLKSEL_S 14 #define AR_RTC_BASE 0x00020000 -#define AR_RTC_RC \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000) +#define AR_RTC_RC(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000) #define AR_RTC_RC_M 0x00000003 #define AR_RTC_RC_MAC_WARM 0x00000001 #define AR_RTC_RC_MAC_COLD 0x00000002 @@ -1357,8 +1357,8 @@ enum { #define AR_RTC_REG_CONTROL1 0x700c #define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001 -#define AR_RTC_PLL_CONTROL \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) +#define AR_RTC_PLL_CONTROL(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) #define AR_RTC_PLL_CONTROL2 0x703c @@ -1378,15 +1378,15 @@ enum { #define PLL4_MEAS_DONE 0x8 #define SQSUM_DVC_MASK 0x007ffff8 -#define AR_RTC_RESET \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) +#define AR_RTC_RESET(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) #define AR_RTC_RESET_EN (0x00000001) -#define AR_RTC_STATUS \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044) +#define AR_RTC_STATUS(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044) -#define AR_RTC_STATUS_M \ - ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f) +#define AR_RTC_STATUS_M(_ah) \ + ((AR_SREV_9100(_ah)) ? 0x0000003f : 0x0000000f) #define AR_RTC_PM_STATUS_M 0x0000000f @@ -1395,32 +1395,32 @@ enum { #define AR_RTC_STATUS_SLEEP 0x00000004 #define AR_RTC_STATUS_WAKEUP 0x00000008 -#define AR_RTC_SLEEP_CLK \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) +#define AR_RTC_SLEEP_CLK(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) #define AR_RTC_FORCE_DERIVED_CLK 0x2 #define AR_RTC_FORCE_SWREG_PRD 0x00000004 -#define AR_RTC_FORCE_WAKE \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) +#define AR_RTC_FORCE_WAKE(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) #define AR_RTC_FORCE_WAKE_EN 0x00000001 #define AR_RTC_FORCE_WAKE_ON_INT 0x00000002 -#define AR_RTC_INTR_CAUSE \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050) +#define AR_RTC_INTR_CAUSE(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050) -#define AR_RTC_INTR_ENABLE \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054) +#define AR_RTC_INTR_ENABLE(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054) -#define AR_RTC_INTR_MASK \ - ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) +#define AR_RTC_INTR_MASK(_ah) \ + ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) #define AR_RTC_KEEP_AWAKE 0x7034 /* RTC_DERIVED_* - only for AR9100 */ -#define AR_RTC_DERIVED_CLK \ - (AR_SREV_9100(ah) ? (AR_RTC_BASE + 0x0038) : 0x7038) +#define AR_RTC_DERIVED_CLK(_ah) \ + (AR_SREV_9100(_ah) ? (AR_RTC_BASE + 0x0038) : 0x7038) #define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe #define AR_RTC_DERIVED_CLK_PERIOD_S 1 @@ -2114,7 +2114,7 @@ enum { #define AR9300_SM_BASE 0xa200 #define AR9002_PHY_AGC_CONTROL 0x9860 #define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4 -#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL) +#define AR_PHY_AGC_CONTROL(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL) #define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */ #define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */ #define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */ diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c index 58c0ab01771b..e1def77591c6 100644 --- a/drivers/net/wireless/ath/ath9k/rng.c +++ b/drivers/net/wireless/ath/ath9k/rng.c @@ -29,9 +29,9 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size) ath9k_ps_wakeup(sc); - REG_RMW_FIELD(ah, AR_PHY_TEST, AR_PHY_TEST_BBB_OBS_SEL, 1); - REG_CLR_BIT(ah, AR_PHY_TEST, AR_PHY_TEST_RX_OBS_SEL_BIT5); - REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_RX_OBS_SEL, 0); + REG_RMW_FIELD(ah, AR_PHY_TEST(ah), AR_PHY_TEST_BBB_OBS_SEL, 1); + REG_CLR_BIT(ah, AR_PHY_TEST(ah), AR_PHY_TEST_RX_OBS_SEL_BIT5); + REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah), AR_PHY_TEST_CTL_RX_OBS_SEL, 0); for (i = 0, j = 0; i < buf_size; i++) { v1 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff; diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index f315c54bd3ac..19345b8f7bfd 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + wmi->last_seq_id = 0; mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 39abb59d8771..ef9a8e0b75e6 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1216,7 +1216,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, txpower -= 2 * power_offset; } - if (OLC_FOR_AR9280_20_LATER && is_cck) + if (OLC_FOR_AR9280_20_LATER(ah) && is_cck) txpower -= 2; txpower = max(txpower, 0); |