ath.git patches for v6.9

We have support for QCA2066 now and also several new features in ath12k.
 
 Major changes:
 
 ath12k
 
 * firmware-2.bin support
 
 * support having multiple identical PCI devices (firmware needs to
   have ATH12K_FW_FEATURE_MULTI_QRTR_ID)
 
 * QCN9274: support split-PHY devices
 
 * WCN7850: enable Power Save Mode in station mode
 
 * WCN7850: P2P support
 
 ath11k:
 
 * QCA6390 & WCN6855: support 2 concurrent station interfaces
 
 * QCA2066 support
 -----BEGIN PGP SIGNATURE-----
 
 iQFLBAABCgA1FiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmXXA/YXHHF1aWNfa3Zh
 bG9AcXVpY2luYy5jb20ACgkQbhckVSbrbZsfJQgAnqXLUIyHS873E4+OfEUIMZIu
 /oFb9RzgU3Uv/8HWqwrXEtgbjthnxHWWVCEIR1roAEHX1vLxm5Wr0w4SsIRskwuK
 8URUKIyHP4oep8TvA9uDrlQZ9PViDwW7F+Xc0nRh5c/AOi7bjMDCbJr7Sazjhsc5
 JNYANHU1WSzc06hAk+3OrPON2e7EEpCkLDb7P1zVMF7uAxDovDpzGT+SXBTZu8Dw
 UtiPbwTcGeUkBuMI4Mse6Jl/wAA2nWZXJXqPB1iilVwdz+VjksAZWODWS+m3UEW1
 I+S8iYO0TWOSwGcu5rMyaeUMoGVSdu0Sw0h/2Ps7WGUoeZ8mT3Xwm6QrJBdpjg==
 =UhMv
 -----END PGP SIGNATURE-----

Merge tag 'ath-next-20240222' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath

ath.git patches for v6.9

We have support for QCA2066 now and also several new features in ath12k.

Major changes:

ath12k

* firmware-2.bin support

* support having multiple identical PCI devices (firmware needs to
  have ATH12K_FW_FEATURE_MULTI_QRTR_ID)

* QCN9274: support split-PHY devices

* WCN7850: enable Power Save Mode in station mode

* WCN7850: P2P support

ath11k:

* QCA6390 & WCN6855: support 2 concurrent station interfaces

* QCA2066 support
This commit is contained in:
Kalle Valo 2024-02-22 12:41:45 +02:00
commit 1c33f0ffac
41 changed files with 2334 additions and 411 deletions

View file

@ -122,6 +122,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -205,6 +206,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
},
{
.name = "qca6390 hw2.0",
@ -255,7 +257,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
@ -290,6 +292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
{
.name = "qcn9074 hw1.0",
@ -372,6 +375,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
},
{
.name = "wcn6855 hw2.0",
@ -422,7 +426,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@ -457,6 +461,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
{
.name = "wcn6855 hw2.1",
@ -505,7 +510,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@ -540,6 +545,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
{
.name = "wcn6750 hw1.0",
@ -621,6 +627,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@ -702,6 +709,93 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
},
{
.name = "qca2066 hw2.1",
.hw_rev = ATH11K_HW_QCA2066_HW21,
.fw = {
.dir = "QCA2066/hw2.1",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6855_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &wcn6855_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = true,
.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {
.start = 0x01400000,
.end = 0x0177ffff,
},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
};

View file

@ -147,6 +147,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
};
enum ath11k_firmware_mode {

View file

@ -58,7 +58,7 @@ static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
config->num_vdevs = 4;
config->num_vdevs = ab->hw_params.num_vdevs;
config->num_peers = 16;
config->num_tids = 32;

View file

@ -226,6 +226,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
};
struct ath11k_hw_ops {

View file

@ -2294,6 +2294,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
/* Initialize rx_mcs_160 to 9 which is an invalid value */
rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@ -2305,6 +2307,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
/* Initialize rx_mcs_80 to 9 which is an invalid value */
rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@ -3023,7 +3027,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
arvif->vdev_id, bss_conf->bssid);
return;
}
peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@ -3046,12 +3057,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
arvif->vdev_id, bss_conf->bssid);
return;
}
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
@ -4008,7 +4013,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
req->ssids[i].ssid_len);
}
} else {
arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
arg->scan_f_passive = 1;
}
if (req->n_channels) {
@ -7585,14 +7590,15 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
psd_power, tx_power, eirp_power;
psd_power, tx_power;
s8 eirp_power = 0;
u16 start_freq, center_freq;
chan = ctx->def.chan;
@ -7758,7 +7764,7 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ieee80211_tx_pwr_env *single_tpe;
enum wmi_reg_6ghz_client_type client_type;
@ -9246,8 +9252,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
arg->scan_f_passive = 1;
arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
ret = ath11k_start_scan(ar, arg);
@ -9818,6 +9824,33 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
{
struct mac_address *addresses;
u16 n_addresses;
int i;
if (!ar->ab->hw_params.support_dual_stations)
return;
n_addresses = ar->ab->hw_params.num_vdevs;
addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
if (!addresses)
return;
memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
for (i = 1; i < n_addresses; i++) {
memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
/* set Local Administered Address bit */
addresses[i].addr[0] |= 0x2;
addresses[i].addr[0] += (i - 1) << 4;
}
ar->hw->wiphy->addresses = addresses;
ar->hw->wiphy->n_addresses = n_addresses;
}
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@ -9837,28 +9870,46 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
limits[1].max = 1;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
combinations[0].limits = limits;
combinations[0].n_limits = 2;
combinations[0].max_interfaces = ab->hw_params.num_vdevs;
combinations[0].num_different_channels = 2;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
} else {
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160);
limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
combinations[0].limits = limits;
combinations[0].n_limits = 2;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160);
}
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@ -9923,6 +9974,8 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
kfree(ar->hw->wiphy->addresses);
SET_IEEE80211_DEV(ar->hw, NULL);
}
@ -9965,6 +10018,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
ath11k_mac_setup_mac_address_list(ar);
SET_IEEE80211_DEV(ar->hw, ab->dev);

View file

@ -443,6 +443,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:

View file

@ -29,6 +29,8 @@
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
#define TCSR_SOC_HW_SUB_VER 0x1910010
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@ -742,8 +744,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
const struct ath11k_pci_ops *pci_ops;
int ret;
u32 sub_version;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@ -788,6 +790,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_pci_free_region;
}
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@ -801,13 +809,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
pci_ops = &ath11k_pci_ops_qcn9074;
ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_pci_free_region;
}
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_pci_free_region;
}
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
@ -820,7 +836,19 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
break;
case 0x10:
case 0x11:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
sub_version);
switch (sub_version) {
case 0x1019A0E1:
case 0x1019B0E1:
case 0x1019C0E1:
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}
break;
default:
goto unsupported_wcn6855_soc;
@ -834,7 +862,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@ -843,12 +870,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_pci_free_region;
}
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);

View file

@ -115,6 +115,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)

View file

@ -2098,7 +2098,7 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->scan_f_chan_stat_evnt = 1;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))

View file

@ -3363,24 +3363,19 @@ struct scan_req_params {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
union {
struct {
u32 scan_ev_started:1,
scan_ev_completed:1,
scan_ev_bss_chan:1,
scan_ev_foreign_chan:1,
scan_ev_dequeued:1,
scan_ev_preempted:1,
scan_ev_start_failed:1,
scan_ev_restarted:1,
scan_ev_foreign_chn_exit:1,
scan_ev_invalid:1,
scan_ev_gpio_timeout:1,
scan_ev_suspended:1,
scan_ev_resumed:1;
};
u32 scan_events;
};
u32 scan_ev_started:1,
scan_ev_completed:1,
scan_ev_bss_chan:1,
scan_ev_foreign_chan:1,
scan_ev_dequeued:1,
scan_ev_preempted:1,
scan_ev_start_failed:1,
scan_ev_restarted:1,
scan_ev_foreign_chn_exit:1,
scan_ev_invalid:1,
scan_ev_gpio_timeout:1,
scan_ev_suspended:1,
scan_ev_resumed:1;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active;
u32 dwell_time_active_2g;
@ -3394,36 +3389,31 @@ struct scan_req_params {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
union {
struct {
u32 scan_f_passive:1,
scan_f_bcast_probe:1,
scan_f_cck_rates:1,
scan_f_ofdm_rates:1,
scan_f_chan_stat_evnt:1,
scan_f_filter_prb_req:1,
scan_f_bypass_dfs_chn:1,
scan_f_continue_on_err:1,
scan_f_offchan_mgmt_tx:1,
scan_f_offchan_data_tx:1,
scan_f_promisc_mode:1,
scan_f_capture_phy_err:1,
scan_f_strict_passive_pch:1,
scan_f_half_rate:1,
scan_f_quarter_rate:1,
scan_f_force_active_dfs_chn:1,
scan_f_add_tpc_ie_in_probe:1,
scan_f_add_ds_ie_in_probe:1,
scan_f_add_spoofed_mac_in_probe:1,
scan_f_add_rand_seq_in_probe:1,
scan_f_en_ie_whitelist_in_probe:1,
scan_f_forced:1,
scan_f_2ghz:1,
scan_f_5ghz:1,
scan_f_80mhz:1;
};
u32 scan_flags;
};
u32 scan_f_passive:1,
scan_f_bcast_probe:1,
scan_f_cck_rates:1,
scan_f_ofdm_rates:1,
scan_f_chan_stat_evnt:1,
scan_f_filter_prb_req:1,
scan_f_bypass_dfs_chn:1,
scan_f_continue_on_err:1,
scan_f_offchan_mgmt_tx:1,
scan_f_offchan_data_tx:1,
scan_f_promisc_mode:1,
scan_f_capture_phy_err:1,
scan_f_strict_passive_pch:1,
scan_f_half_rate:1,
scan_f_quarter_rate:1,
scan_f_force_active_dfs_chn:1,
scan_f_add_tpc_ie_in_probe:1,
scan_f_add_ds_ie_in_probe:1,
scan_f_add_spoofed_mac_in_probe:1,
scan_f_add_rand_seq_in_probe:1,
scan_f_en_ie_whitelist_in_probe:1,
scan_f_forced:1,
scan_f_2ghz:1,
scan_f_5ghz:1,
scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;

View file

@ -19,7 +19,9 @@ ath12k-y += core.o \
hw.o \
mhi.o \
pci.o \
dp_mon.o
dp_mon.o \
fw.o \
p2p.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o

View file

@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
#include "fw.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@ -509,6 +510,33 @@ int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd
return ret;
}
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
{
if (ab->num_radios == 2)
return TARGET_NUM_STATIONS_DBS;
else if (ab->num_radios == 3)
return TARGET_NUM_PEERS_PDEV_DBS_SBS;
return TARGET_NUM_STATIONS_SINGLE;
}
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
{
if (ab->num_radios == 2)
return TARGET_NUM_PEERS_PDEV_DBS;
else if (ab->num_radios == 3)
return TARGET_NUM_PEERS_PDEV_DBS_SBS;
return TARGET_NUM_PEERS_PDEV_SINGLE;
}
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
{
if (ab->num_radios == 2)
return TARGET_NUM_TIDS(DBS);
else if (ab->num_radios == 3)
return TARGET_NUM_TIDS(DBS_SBS);
return TARGET_NUM_TIDS(SINGLE);
}
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@ -720,6 +748,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_mac_destroy;
}
ath12k_dp_hal_rx_desc_init(ab);
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@ -879,21 +909,29 @@ static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
struct ath12k *ar;
struct ath12k_hw *ah;
struct ieee80211_hw *hw;
bool rfkill_radio_on;
int i;
int i, j;
spin_lock_bh(&ab->base_lock);
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
if (!ar)
for (i = 0; i < ab->num_hw; i++) {
ah = ab->ah[i];
if (!ah)
continue;
hw = ath12k_ar_to_hw(ar);
ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
if (!ar)
continue;
ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
}
hw = ah->hw;
wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
}
}
@ -950,6 +988,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@ -1109,6 +1148,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
ath12k_fw_map(ab);
return 0;
}
@ -1137,6 +1178,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@ -1185,6 +1227,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
ab->slo_capable = true;
return ab;

View file

@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@ -24,6 +25,7 @@
#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
#include "fw.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -264,6 +266,7 @@ struct ath12k_vif {
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
bool ps;
};
struct ath12k_vif_iter {
@ -824,6 +827,27 @@ struct ath12k_base {
u32 subsystem_device;
} id;
struct {
u32 api_version;
const struct firmware *fw;
const u8 *amss_data;
size_t amss_len;
const u8 *amss_dualmac_data;
size_t amss_dualmac_len;
const u8 *m3_data;
size_t m3_len;
DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
} fw;
const struct hal_rx_ops *hal_rx_ops;
/* slo_capable denotes if the single/multi link operation
* is supported within the same chip (SoC).
*/
bool slo_capable;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@ -855,6 +879,9 @@ int ath12k_core_suspend(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@ -997,6 +997,29 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
}
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
{
if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
return true;
}
return false;
}
void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
{
if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
/* RX TLVS compaction is supported, hence change the hal_rx_ops
* to compact hal_rx_ops.
*/
ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
}
ab->hal.hal_desc_sz =
ab->hal_rx_ops->rx_desc_get_desc_size();
}
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@ -766,6 +766,11 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
@ -1089,6 +1094,11 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 rx_mpdu_offset;
__le32 rx_msdu_offset;
__le32 rx_attn_offset;
__le32 info2;
__le32 reserved[2];
__le32 rx_mpdu_start_end_mask;
__le32 rx_msdu_end_word_mask;
__le32 info3;
} __packed;
struct htt_rx_ring_tlv_filter {
@ -1105,6 +1115,9 @@ struct htt_rx_ring_tlv_filter {
u16 rx_msdu_end_offset;
u16 rx_msdu_start_offset;
u16 rx_attn_offset;
u16 rx_mpdu_start_wmask;
u16 rx_mpdu_end_wmask;
u32 rx_msdu_end_wmask;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
@ -1821,4 +1834,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie);
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 desc_id);
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif

View file

@ -864,7 +864,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff
{
u32 rx_pkt_offset, l2_hdr_offset;
rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
@ -917,7 +917,8 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
hdr_desc =
ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;

View file

@ -23,34 +23,34 @@
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
@ -58,7 +58,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
@ -67,156 +67,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
@ -224,13 +224,19 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
}
static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@ -1761,7 +1767,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@ -2473,7 +2479,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
@ -2804,7 +2810,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
@ -2854,7 +2860,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
if (!flags)
return;
@ -2892,7 +2898,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@ -2968,7 +2974,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@ -3122,7 +3128,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@ -3305,7 +3311,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
@ -3486,7 +3492,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
(DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
(DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
@ -3510,7 +3516,7 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
@ -3607,7 +3613,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
@ -3695,16 +3701,15 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
struct sk_buff_head msdu_list[MAX_RADIOS];
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
int mac_id;
u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
int ret, i;
int ret, pdev_id;
for (i = 0; i < ab->num_radios; i++)
__skb_queue_head_init(&msdu_list[i]);
__skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
@ -3737,11 +3742,6 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
}
/* FIXME: Extract mac id correctly. Since descs are not tied
* to mac, we can extract from vdev id in ring desc.
*/
mac_id = 0;
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
@ -3771,7 +3771,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
__skb_queue_tail(&msdu_list[mac_id], msdu);
__skb_queue_tail(&msdu_list, msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
@ -3788,21 +3789,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
if (!rcu_dereference(ab->pdevs_active[i])) {
__skb_queue_purge(&msdu_list[i]);
while ((msdu = __skb_dequeue(&msdu_list))) {
mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
(struct hal_rx_desc *)msdu->data);
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
dev_kfree_skb_any(msdu);
continue;
}
ar = ab->pdevs[i].ar;
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
__skb_queue_purge(&msdu_list[i]);
dev_kfree_skb_any(msdu);
continue;
}
while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
@ -3922,7 +3924,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@ -3935,14 +3937,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
tlv_filter.rx_mpdu_start_wmask =
ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
tlv_filter.rx_msdu_end_wmask =
ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
}
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
* and modify the rx_desc struct
*/
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
@ -3957,7 +3965,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@ -3973,9 +3981,9 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg

View file

@ -964,6 +964,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
if (tlv_filter->rx_mpdu_start_wmask > 0 &&
tlv_filter->rx_msdu_end_wmask > 0) {
cmd->info2 |=
le32_encode_bits(true,
HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
cmd->rx_mpdu_start_end_mask =
le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
/* mpdu_end is not used for any hardwares so far
* please assign it in future if any chip is
* using through hal ops
*/
cmd->rx_mpdu_start_end_mask |=
le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
cmd->rx_msdu_end_word_mask =
le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
}
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;

View file

@ -0,0 +1,171 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
const char *name)
{
size_t magic_len, len, ie_len;
int ie_id, i, index, bit, ret;
struct ath12k_fw_ie *hdr;
const u8 *data;
__le32 *timestamp;
ab->fw.fw = ath12k_core_firmware_request(ab, name);
if (IS_ERR(ab->fw.fw)) {
ret = PTR_ERR(ab->fw.fw);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
ab->fw.fw = NULL;
return ret;
}
data = ab->fw.fw->data;
len = ab->fw.fw->size;
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
ath12k_err(ab, "firmware image too small to contain magic: %zu\n",
len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) {
ath12k_err(ab, "Invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
/* jump over the padding */
magic_len = ALIGN(magic_len, 4);
/* make sure there's space for padding */
if (magic_len > len) {
ath12k_err(ab, "No space for padding after magic\n");
ret = -EINVAL;
goto err;
}
len -= magic_len;
data += magic_len;
/* loop elements */
while (len > sizeof(struct ath12k_fw_ie)) {
hdr = (struct ath12k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data += sizeof(*hdr);
if (len < ie_len) {
ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
}
switch (ie_id) {
case ATH12K_FW_IE_TIMESTAMP:
if (ie_len != sizeof(u32))
break;
timestamp = (__le32 *)data;
ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n",
le32_to_cpup(timestamp));
break;
case ATH12K_FW_IE_FEATURES:
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"found firmware features ie (%zd B)\n",
ie_len);
for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) {
index = i / 8;
bit = i % 8;
if (index == ie_len)
break;
if (data[index] & (1 << bit))
__set_bit(i, ab->fw.fw_features);
}
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
ab->fw.fw_features,
sizeof(ab->fw.fw_features));
break;
case ATH12K_FW_IE_AMSS_IMAGE:
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
ab->fw.amss_data = data;
ab->fw.amss_len = ie_len;
break;
case ATH12K_FW_IE_M3_IMAGE:
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"found m3 image ie (%zd B)\n",
ie_len);
ab->fw.m3_data = data;
ab->fw.m3_len = ie_len;
break;
case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"found dualmac fw image ie (%zd B)\n",
ie_len);
ab->fw.amss_dualmac_data = data;
ab->fw.amss_dualmac_len = ie_len;
break;
default:
ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id);
break;
}
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
/* make sure there's space for padding */
if (ie_len > len)
break;
len -= ie_len;
data += ie_len;
}
return 0;
err:
release_firmware(ab->fw.fw);
ab->fw.fw = NULL;
return ret;
}
void ath12k_fw_map(struct ath12k_base *ab)
{
int ret;
ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE);
if (ret == 0)
ab->fw.api_version = 2;
else
ab->fw.api_version = 1;
ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n",
ab->fw.api_version);
}
void ath12k_fw_unmap(struct ath12k_base *ab)
{
release_firmware(ab->fw.fw);
memset(&ab->fw, 0, sizeof(ab->fw));
}

View file

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_FW_H
#define ATH12K_FW_H
#define ATH12K_FW_API2_FILE "firmware-2.bin"
#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW"
enum ath12k_fw_ie_type {
ATH12K_FW_IE_TIMESTAMP = 0,
ATH12K_FW_IE_FEATURES = 1,
ATH12K_FW_IE_AMSS_IMAGE = 2,
ATH12K_FW_IE_M3_IMAGE = 3,
ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
};
enum ath12k_fw_features {
/* The firmware supports setting the QRTR id via register
* PCIE_LOCAL_REG_QRTR_NODE_ID
*/
ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
/* keep last */
ATH12K_FW_FEATURE_COUNT,
};
void ath12k_fw_map(struct ath12k_base *ab);
void ath12k_fw_unmap(struct ath12k_base *ab);
#endif /* ATH12K_FW_H */

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
RX_MSDU_END_INFO5_DA_IS_MCBC;
return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
return 0;
}
static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
{
return QCN9274_MPDU_START_WMASK;
}
static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
{
return QCN9274_MSDU_END_WMASK;
}
static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
{
return &hal_rx_qcn9274_compact_ops;
}
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
const struct hal_ops hal_qcn9274_ops = {
static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
{
return sizeof(struct hal_rx_desc_qcn9274);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
{
return 0;
}
const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
.create_srng_config = ath12k_hal_srng_create_config_qcn9274,
.tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
.rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
.rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
};
static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
RX_MSDU_END_INFO5_FIRST_MSDU);
}
static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
RX_MSDU_END_INFO5_LAST_MSDU);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
RX_MSDU_END_INFO5_L3_HDR_PADDING);
}
static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
}
static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
RX_MPDU_START_INFO2_ENC_TYPE);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
RX_MSDU_END_INFO11_DECAP_FORMAT);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
}
static bool
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
}
static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
}
static u16
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
}
static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
RX_MSDU_END_INFO10_MSDU_LENGTH);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_INFO12_SGI);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_INFO12_RATE_MCS);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_INFO12_RECV_BW);
}
static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_INFO12_PKT_TYPE);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
RX_MSDU_END_QCN9274_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
}
static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
}
static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
}
static void
ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
}
static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.qcn9274_compact.msdu_payload[0];
}
static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
{
return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
}
static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
{
return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
}
static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
}
static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.qcn9274_compact.mpdu_start.addr2;
}
static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
}
hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
}
static void
ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
unsigned int key_id;
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
return;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
crypto_hdr[1] = 0;
crypto_hdr[2] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
break;
case HAL_ENCRYPT_TYPE_CCMP_128:
case HAL_ENCRYPT_TYPE_CCMP_256:
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
crypto_hdr[1] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
crypto_hdr[2] = 0;
break;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
return;
}
key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
RX_MPDU_START_INFO5_KEY_ID);
crypto_hdr[3] = 0x20 | (key_id << 6);
crypto_hdr[4] =
HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
crypto_hdr[5] =
HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
crypto_hdr[6] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
crypto_hdr[7] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
}
static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
}
static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
RX_MSDU_END_INFO14_MSDU_DONE);
}
static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
}
static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
}
static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
{
return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
RX_DESC_DECRYPT_STATUS_CODE_OK);
}
static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
{
u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
u32 errmap = 0;
if (info & RX_MSDU_END_INFO13_FCS_ERR)
errmap |= HAL_RX_MPDU_ERR_FCS;
if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
errmap |= HAL_RX_MPDU_ERR_DECRYPT;
if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
return errmap;
}
static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
{
return sizeof(struct hal_rx_desc_qcn9274_compact);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
{
return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
RX_MSDU_END_64_TLV_SRC_LINK_ID);
}
const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
.rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
.rx_desc_get_mpdu_seq_ctl_vld =
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no =
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
.rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
.rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
.rx_desc_get_mpdu_start_offset =
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
.rx_desc_get_msdu_end_offset =
ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
.rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl =
ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
.rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
.rx_desc_get_msdu_src_link_id =
ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
};
const struct hal_ops hal_qcn9274_ops = {
.create_srng_config = ath12k_hal_srng_create_config_qcn9274,
.tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
.rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
.get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
const struct hal_ops hal_wcn7850_ops = {
static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
{
return sizeof(struct hal_rx_desc_wcn7850);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
{
return 0;
}
const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
.create_srng_config = ath12k_hal_srng_create_config_wcn7850,
.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
.rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
.rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
};
const struct hal_ops hal_wcn7850_ops = {
.create_srng_config = ath12k_hal_srng_create_config_wcn7850,
.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.rxdma_ring_wmask_rx_mpdu_start = NULL,
.rxdma_ring_wmask_rx_msdu_end = NULL,
.get_hal_rx_compact_ops = NULL,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@ -1023,6 +1023,8 @@ struct ath12k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
u32 hal_desc_sz;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
struct hal_ops {
struct hal_rx_ops {
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
@ -1070,18 +1072,30 @@ struct hal_ops {
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
int (*create_srng_config)(struct ath12k_base *ab);
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
u32 (*rx_desc_get_desc_size)(void);
u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
};
struct hal_ops {
int (*create_srng_config)(struct ath12k_base *ab);
u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
extern const struct hal_ops hal_qcn9274_ops;
extern const struct hal_ops hal_wcn7850_ops;
extern const struct hal_rx_ops hal_rx_qcn9274_ops;
extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
extern const struct hal_rx_ops hal_rx_wcn7850_ops;
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@ -897,7 +897,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@ -917,6 +916,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.def_num_link = 0,
.max_mlo_peer = 256,
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
},
{
.name = "wcn7850 hw2.0",
@ -953,7 +956,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.idle_ps = true,
@ -963,7 +969,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
@ -984,6 +989,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.def_num_link = 2,
.max_mlo_peer = 32,
.otp_board_id_register = 0,
.supports_sta_ps = true,
},
{
.name = "qcn9274 hw2.0",
@ -993,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 1,
.max_radios = 2,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
@ -1029,7 +1038,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@ -1049,6 +1057,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.def_num_link = 0,
.max_mlo_peer = 256,
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
},
};

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@ -17,19 +17,30 @@
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
TARGET_NUM_VDEVS)
#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
TARGET_NUM_VDEVS)
#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
/* Num of peers for DBS */
#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
/* Num of peers for DBS_SBS */
#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
/* Max num of stations (per radio) */
#define TARGET_NUM_STATIONS 512
/* Max num of stations for Single Radio mode */
#define TARGET_NUM_STATIONS_SINGLE 512
/* Max num of stations for DBS */
#define TARGET_NUM_STATIONS_DBS 128
/* Max num of stations for DBS_SBS */
#define TARGET_NUM_STATIONS_DBS_SBS 128
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
@ -66,6 +77,8 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
#define TARGET_RX_PEER_METADATA_VER_V1A 2
#define TARGET_RX_PEER_METADATA_VER_V1B 3
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@ -174,7 +187,6 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
u32 hal_desc_sz;
u32 num_tcl_banks;
u32 max_tx_ring;
@ -195,6 +207,10 @@ struct ath12k_hw_params {
u8 def_num_link;
u16 max_mlo_peer;
u32 otp_board_id_register;
bool supports_sta_ps;
};
struct ath12k_hw_ops {

View file

@ -563,7 +563,8 @@ struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
if (pdev && pdev->ar &&
(pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@ -1083,6 +1084,46 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
return 0;
err:
return ret;
}
static int ath12k_mac_config(struct ath12k *ar, u32 changed)
{
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@ -1138,6 +1179,63 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
struct sk_buff *bcn)
{
struct ath12k *ar = arvif->ar;
struct ieee80211_mgmt *mgmt;
const u8 *p2p_ie;
int ret;
mgmt = (void *)bcn->data;
p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
mgmt->u.beacon.variable,
bcn->len - (mgmt->u.beacon.variable -
bcn->data));
if (!p2p_ie) {
ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
return -ENOENT;
}
ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
if (ret) {
ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
u8 oui_type, size_t ie_offset)
{
const u8 *next, *end;
size_t len;
u8 *ie;
if (WARN_ON(skb->len < ie_offset))
return -EINVAL;
ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
skb->data + ie_offset,
skb->len - ie_offset);
if (!ie)
return -ENOENT;
len = ie[1] + 2;
end = skb->data + skb->len;
next = ie + len;
if (WARN_ON(next > end))
return -EINVAL;
memmove(ie, next, end - next);
skb_trim(skb, skb->len - len);
return 0;
}
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
@ -1170,14 +1268,37 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
if (ret) {
ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
ret);
goto free_bcn_skb;
}
kfree_skb(bcn);
/* P2P IE is inserted by firmware automatically (as
* configured above) so remove it from the base beacon
* template to avoid duplicate P2P IEs in beacon frames.
*/
ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
WLAN_OUI_TYPE_WFA_P2P,
offsetof(struct ieee80211_mgmt,
u.beacon.variable));
if (ret) {
ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n",
ret);
goto free_bcn_skb;
}
}
ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
free_bcn_skb:
kfree_skb(bcn);
return ret;
}
@ -2509,12 +2630,60 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
lockdep_assert_held(&ar->conf_mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
enable_ps = arvif->ps;
if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
/* firmware doesn't like 0 */
timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
timeout);
if (ret) {
ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return;
}
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
if (ret)
ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
}
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info,
u64 changed)
{
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@ -2784,6 +2953,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params->supports_sta_ps) {
arvif->ps = vif_cfg->ps;
ath12k_mac_vif_setup_ps(arvif);
}
}
static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
@ -3023,7 +3198,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_f_passive = 1;
}
if (req->n_channels) {
@ -3821,6 +3996,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
}
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
@ -4985,8 +5167,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@ -5035,6 +5217,27 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
return 0;
}
static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
struct ieee80211_vif *vif,
struct sk_buff *skb,
bool is_prb_rsp)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
if (likely(!is_prb_rsp))
return;
spin_lock_bh(&ar->data_lock);
if (arvif->u.ap.noa_data &&
!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
GFP_ATOMIC))
skb_put_data(skb, arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
spin_unlock_bh(&ar->data_lock);
}
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@ -5058,10 +5261,11 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@ -5071,6 +5275,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
/* This is case only for P2P_GO */
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
@ -5363,7 +5571,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@ -5588,17 +5796,29 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
if (vif->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
if (vif->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
case NL80211_IFTYPE_P2P_DEVICE:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
break;
default:
WARN_ON(1);
break;
@ -6150,6 +6370,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
/* Fill the MBSSID flags to indicate AP is non MBSSID by default
* Corresponding flags would be updated with MBSSID support.
*/
arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
@ -6226,46 +6451,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
return 0;
err:
return ret;
}
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
@ -6635,11 +6820,13 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
ath12k_warn(ab, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
ath12k_warn(ab, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
}
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
@ -7285,6 +7472,125 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
ar->scan.roc_notify = false;
spin_unlock_bh(&ar->data_lock);
ath12k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
return 0;
}
static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
int duration,
enum ieee80211_roc_type type)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_wmi_scan_req_arg arg;
struct ath12k *ar;
u32 scan_time_msec;
int ret;
ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
reinit_completion(&ar->scan.started);
reinit_completion(&ar->scan.completed);
reinit_completion(&ar->scan.on_channel);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = true;
ar->scan.vdev_id = arvif->vdev_id;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
break;
case ATH12K_SCAN_STARTING:
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ret = -EBUSY;
break;
}
spin_unlock_bh(&ar->data_lock);
if (ret)
goto exit;
scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath12k_wmi_start_scan_init(ar, &arg);
arg.num_chan = 1;
arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
GFP_KERNEL);
if (!arg.chan_list) {
ret = -ENOMEM;
goto exit;
}
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH12K_SCAN_ID;
arg.chan_list[0] = chan->center_freq;
arg.dwell_time_active = scan_time_msec;
arg.dwell_time_passive = scan_time_msec;
arg.max_scan_time = scan_time_msec;
arg.scan_f_passive = 1;
arg.burst_duration = duration;
ret = ath12k_start_scan(ar, &arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
goto free_chan_list;
}
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
if (ret == 0) {
ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
ret = -ETIMEDOUT;
goto free_chan_list;
}
ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
msecs_to_jiffies(duration));
ret = 0;
free_chan_list:
kfree(arg.chan_list);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@ -7319,6 +7625,8 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@ -7479,9 +7787,10 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
bool ap, mesh;
bool ap, mesh, p2p;
ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
@ -7490,9 +7799,15 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
if (!combinations)
return -ENOMEM;
if (ap || mesh) {
if ((ap || mesh) && !p2p) {
n_limits = 2;
max_interfaces = 16;
} else if (p2p) {
n_limits = 3;
if (ap || mesh)
max_interfaces = 16;
else
max_interfaces = 3;
} else {
n_limits = 1;
max_interfaces = 1;
@ -7507,14 +7822,22 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
if (ap) {
if (ap || mesh || p2p)
limits[1].max = max_interfaces;
if (ap)
limits[1].types |= BIT(NL80211_IFTYPE_AP);
}
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
if (p2p) {
limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
limits[2].max = 1;
limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
}
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
@ -7619,8 +7942,8 @@ static int ath12k_mac_setup_register(struct ath12k *ar,
ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
ath12k_mac_setup_sband_iftype_data(ar, cap);
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
return 0;
}
@ -7831,6 +8154,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
@ -7978,6 +8302,7 @@ int ath12k_mac_allocate(struct ath12k_base *ab)
if (!ah) {
ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
i);
ret = -ENOMEM;
goto err;
}

View file

@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@ -13,6 +14,8 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define OTP_INVALID_BOARD_ID 0xFFFF
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@ -358,23 +361,60 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
unsigned int board_id;
int ret;
bool dualmac = false;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
ab_pci->amss_path,
sizeof(ab_pci->amss_path));
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
mhi_ctrl->rddm_size = ab->hw_params->rddm_size;
if (ab->hw_params->otp_board_id_register) {
board_id =
ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register);
board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK);
if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"failed to read board id\n");
} else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
dualmac = true;
ab->slo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"dualmac fw selected for board id: %x\n", board_id);
}
}
if (dualmac) {
if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
/* use MHI firmware file from firmware-N.bin */
mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
} else {
ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
ret = -ENOENT;
goto free_controller;
}
} else {
if (ab->fw.amss_data && ab->fw.amss_len > 0) {
/* use MHI firmware file from firmware-N.bin */
mhi_ctrl->fw_data = ab->fw.amss_data;
mhi_ctrl->fw_sz = ab->fw.amss_len;
} else {
/* use the old separate mhi.bin MHI firmware file */
ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
ab_pci->amss_path,
sizeof(ab_pci->amss_path));
mhi_ctrl->fw_image = ab_pci->amss_path;
}
}
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");

View file

@ -0,0 +1,142 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
#include "core.h"
#include "mac.h"
#include "p2p.h"
static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ieee80211_p2p_noa_attr *noa_attr;
u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
__le16 *noa_attr_len;
u16 attr_len;
u8 noa_descriptors = le32_get_bits(noa->noa_attr,
WMI_P2P_NOA_INFO_DESC_NUM);
int i;
/* P2P IE */
data[0] = WLAN_EID_VENDOR_SPECIFIC;
data[1] = len - 2;
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
data[5] = WLAN_OUI_TYPE_WFA_P2P;
/* NOA ATTR */
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
noa_attr->index = le32_get_bits(noa->noa_attr,
WMI_P2P_NOA_INFO_INDEX);
noa_attr->oppps_ctwindow = ctwindow;
if (oppps)
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
for (i = 0; i < noa_descriptors; i++) {
noa_attr->desc[i].count =
__le32_to_cpu(noa->descriptors[i].type_count);
noa_attr->desc[i].duration = noa->descriptors[i].duration;
noa_attr->desc[i].interval = noa->descriptors[i].interval;
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
}
attr_len = 2; /* index + oppps_ctwindow */
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
*noa_attr_len = __cpu_to_le16(attr_len);
}
static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa)
{
size_t len = 0;
if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) &&
!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
return 0;
len += 1 + 1 + 4; /* EID + len + OUI */
len += 1 + 2; /* noa attr + attr len */
len += 1 + 1; /* index + oppps_ctwindow */
len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) *
sizeof(struct ieee80211_p2p_noa_desc);
return len;
}
static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
size_t len)
{
struct ath12k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
kfree(arvif->u.ap.noa_data);
arvif->u.ap.noa_data = ie;
arvif->u.ap.noa_len = len;
}
static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
void *ie;
size_t len;
lockdep_assert_held(&ar->data_lock);
ath12k_p2p_noa_ie_assign(arvif, NULL, 0);
len = ath12k_p2p_noa_ie_len_compute(noa);
if (!len)
return;
ie = kmalloc(len, GFP_ATOMIC);
if (!ie)
return;
ath12k_p2p_noa_ie_fill(ie, len, noa);
ath12k_p2p_noa_ie_assign(arvif, ie, len);
}
void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
spin_lock_bh(&ar->data_lock);
__ath12k_p2p_noa_update(arvif, noa);
spin_unlock_bh(&ar->data_lock);
}
static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_p2p_noa_arg *arg = data;
if (arvif->vdev_id != arg->vdev_id)
return;
ath12k_p2p_noa_update(arvif, arg->noa);
}
void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
.noa = noa,
};
ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
IEEE80211_IFACE_ITER_NORMAL,
ath12k_p2p_noa_update_vdev_iter,
&arg);
}

View file

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#ifndef ATH12K_P2P_H
#define ATH12K_P2P_H
#include "wmi.h"
struct ath12k_wmi_p2p_noa_info;
struct ath12k_p2p_noa_arg {
u32 vdev_id;
const struct ath12k_wmi_p2p_noa_info *noa;
};
void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa);
void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
const struct ath12k_wmi_p2p_noa_info *noa);
#endif

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -39,6 +39,10 @@
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
#define BUS_NUMBER_MASK GENMASK(3, 0)
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@ -201,18 +205,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
/* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
* use 0th window
*/
else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
!((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
{
return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
}
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
@ -682,12 +685,22 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
struct pci_bus *bus = ab_pci->pdev->bus;
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
ab_pci->qmi_instance =
u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
u32_encode_bits(bus->number, BUS_NUMBER_MASK);
ab->qmi.service_ins_id += ab_pci->qmi_instance;
}
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
@ -901,6 +914,26 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 reg;
/* On platforms with two or more identical mhi devices, qmi service run
* with identical qrtr-node-id. Because of this identical ID qrtr-lookup
* cannot register more than one qmi service with identical node ID.
*
* This generates a unique instance ID from PCIe domain number and bus number,
* writes to the given register, it is available for firmware when the QMI service
* is spawned.
*/
reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
}
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
@ -1138,15 +1171,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
if (ath12k_pci_is_offset_within_mhi_region(offset)) {
offset = offset - PCI_MHIREGLEN_REG;
val = ioread32(ab->mem +
(offset & WINDOW_RANGE_MASK));
} else {
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
spin_unlock_bh(&ab_pci->window_lock);
} else {
if ((!window_start) &&
(offset >= PCI_MHIREGLEN_REG &&
offset <= PCI_MHI_REGION_END))
offset = offset - PCI_MHIREGLEN_REG;
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@ -1183,15 +1218,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
if (ath12k_pci_is_offset_within_mhi_region(offset)) {
offset = offset - PCI_MHIREGLEN_REG;
iowrite32(value, ab->mem +
(offset & WINDOW_RANGE_MASK));
} else {
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
spin_unlock_bh(&ab_pci->window_lock);
} else {
if ((!window_start) &&
(offset >= PCI_MHIREGLEN_REG &&
offset <= PCI_MHI_REGION_END))
offset = offset - PCI_MHIREGLEN_REG;
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@ -1219,6 +1256,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
ath12k_pci_update_qrtr_node_id(ab);
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
@ -1324,6 +1364,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@ -1346,6 +1387,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@ -53,6 +53,9 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
#define OTP_BOARD_ID_MASK GENMASK(15, 0)
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@ -111,6 +114,7 @@ struct ath12k_pci {
u16 link_ctl;
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
u32 qmi_instance;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)

View file

@ -2124,6 +2124,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret;
if (!ab->slo_capable)
goto out;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@ -2666,37 +2669,56 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
const struct firmware *fw;
const struct firmware *fw = NULL;
const void *m3_data;
char path[100];
size_t m3_len;
int ret;
if (m3_mem->vaddr || m3_mem->size)
if (m3_mem->vaddr)
/* m3 firmware buffer is already available in the DMA buffer */
return 0;
fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
path, sizeof(path));
ath12k_err(ab, "failed to load %s: %d\n", path, ret);
return ret;
if (ab->fw.m3_data && ab->fw.m3_len > 0) {
/* firmware-N.bin had a m3 firmware file so use that */
m3_data = ab->fw.m3_data;
m3_len = ab->fw.m3_len;
} else {
/* No m3 file in firmware-N.bin so try to request old
* separate m3.bin.
*/
fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
path, sizeof(path));
ath12k_err(ab, "failed to load %s: %d\n", path, ret);
return ret;
}
m3_data = fw->data;
m3_len = fw->size;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
fw->size, &m3_mem->paddr,
m3_len, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
release_firmware(fw);
return -ENOMEM;
ret = -ENOMEM;
goto out;
}
memcpy(m3_mem->vaddr, fw->data, fw->size);
m3_mem->size = fw->size;
memcpy(m3_mem->vaddr, m3_data, m3_len);
m3_mem->size = m3_len;
ret = 0;
out:
release_firmware(fw);
return 0;
return ret;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)

View file

@ -15,7 +15,6 @@
#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 {
__le32 res1;
} __packed;
#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0)
#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1)
#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2)
#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3)
#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4)
#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5)
#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6)
#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7)
#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8)
#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9)
#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10)
#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11)
#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12)
#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13)
#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14)
#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15)
#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \
QCN9274_MPDU_START_SELECT_PN_95_32 | \
QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \
QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \
QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \
QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \
QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \
QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \
QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \
QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL)
/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask
* value QCN9274_MPDU_START_WMASK. If the mask value changes the structure
* will also change.
*/
struct rx_mpdu_start_qcn9274_compact {
__le32 info1;
__le32 pn[4];
__le32 info2;
__le32 peer_meta_data;
__le16 info3;
__le16 phy_ppdu_id;
__le16 ast_index;
__le16 sw_peer_id;
__le32 info4;
__le32 info5;
__le32 info6;
__le16 frame_ctrl;
__le16 duration;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
__le16 qos_ctrl;
} __packed;
/* rx_mpdu_start
*
* reo_destination_indication
@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type {
RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
@ -786,6 +843,52 @@ struct rx_msdu_end_qcn9274 {
__le32 info14;
} __packed;
#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0)
#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1)
#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2)
#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3)
#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4)
#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5)
#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6)
#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7)
#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8)
#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9)
#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10)
#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11)
#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12)
#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13)
#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14)
#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15)
#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16)
#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \
QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \
QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \
QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \
QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \
QCN9274_MSDU_END_SELECT_INFO13_INFO14)
/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value
* QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also
* change.
*/
struct rx_msdu_end_qcn9274_compact {
__le64 msdu_end_tag;
__le16 sa_sw_peer_id;
__le16 info5;
__le16 sa_idx;
__le16 da_idx_or_sw_peer_id;
__le32 info10;
__le32 info11;
__le32 info12;
__le32 flow_id_toeplitz;
__le32 ppdu_start_timestamp_63_32;
__le32 phy_meta_data;
__le32 info13;
__le32 info14;
} __packed;
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
@ -1450,16 +1553,18 @@ struct rx_msdu_end_wcn7850 {
*
*/
/* TODO: Move to compact TLV approach
* By default these tlv's are not aligned to 128b boundary
* Need to remove unused qwords and make them compact/aligned
*/
struct hal_rx_desc_qcn9274 {
struct rx_msdu_end_qcn9274 msdu_end;
struct rx_mpdu_start_qcn9274 mpdu_start;
u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9274_compact {
struct rx_msdu_end_qcn9274_compact msdu_end;
struct rx_mpdu_start_qcn9274_compact mpdu_start;
u8 msdu_payload[];
} __packed;
#define RX_BE_PADDING0_BYTES 8
#define RX_BE_PADDING1_BYTES 8
@ -1484,6 +1589,7 @@ struct hal_rx_desc_wcn7850 {
struct hal_rx_desc {
union {
struct hal_rx_desc_qcn9274 qcn9274;
struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
} u;
} __packed;

View file

@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_enable_event) },
[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_disable_event) },
[WMI_TAG_P2P_NOA_INFO] = {
.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
[WMI_TAG_P2P_NOA_EVENT] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
if (ab->num_radios == 2) {
config->num_peers = TARGET_NUM_PEERS(DBS);
config->num_tids = TARGET_NUM_TIDS(DBS);
} else if (ab->num_radios == 3) {
config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
} else {
/* Control should not reach here */
config->num_peers = TARGET_NUM_PEERS(SINGLE);
config->num_tids = TARGET_NUM_TIDS(SINGLE);
}
config->num_peers = ab->num_radios *
ath12k_core_get_max_peers_per_radio(ab);
config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@ -228,6 +228,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@ -493,13 +496,13 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@ -727,6 +730,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
struct ieee80211_tx_info *info)
{
struct ath12k_base *ab = ar->ab;
u32 freq = 0;
if (ab->hw_params->single_pdev_only &&
ar->scan.is_roc &&
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
freq = ar->scan.roc_freq;
return freq;
}
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@ -752,6 +769,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@ -770,7 +788,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
cmd->chanfreq = 0;
cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@ -826,6 +844,9 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@ -1024,6 +1045,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
if (!restart) {
if (arg->ssid) {
@ -1051,7 +1073,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
* [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@ -1710,6 +1732,48 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
size_t p2p_ie_len, aligned_len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
p2p_ie_len = p2p_ie[1] + 2;
aligned_len = roundup(p2p_ie_len, sizeof(u32));
len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
aligned_len);
memcpy(tlv->value, p2p_ie, p2p_ie_len);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
@ -2130,7 +2194,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@ -3265,6 +3329,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
@ -4214,7 +4281,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@ -4271,7 +4338,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
if (ab->pdevs[i].pdev_id ==
ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@ -5006,6 +5074,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
if (ar->scan.is_roc)
ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
complete(&ar->scan.started);
break;
}
@ -5090,6 +5162,10 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
if (ar->scan.is_roc && ar->scan.roc_freq == freq)
complete(&ar->scan.on_channel);
break;
}
}
@ -6630,6 +6706,53 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_p2p_noa_event *ev;
const struct ath12k_wmi_p2p_noa_info *noa;
struct ath12k *ar;
int ret, vdev_id;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_P2P_NOA_EVENT];
noa = tb[WMI_TAG_P2P_NOA_INFO];
if (!ev || !noa) {
ret = -EPROTO;
goto out;
}
vdev_id = __le32_to_cpu(ev->vdev_id);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"wmi tlv p2p noa vdev_id %i descriptors %u\n",
vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
vdev_id);
ret = -EINVAL;
goto out;
}
ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
ret = 0;
out:
kfree(tb);
return ret;
}
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@ -6670,6 +6793,64 @@ ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
trace_ath12k_wmi_diag(ab, skb->data, skb->len);
}
static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_twt_enable_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
ret);
return;
}
ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
goto exit;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
le32_to_cpu(ev->pdev_id),
le32_to_cpu(ev->status));
exit:
kfree(tb);
}
static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_twt_disable_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
ret);
return;
}
ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
goto exit;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
le32_to_cpu(ev->pdev_id),
le32_to_cpu(ev->status));
exit:
kfree(tb);
}
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@ -6765,11 +6946,18 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
case WMI_TWT_ENABLE_EVENTID:
ath12k_wmi_twt_enable_event(ab, skb);
break;
case WMI_TWT_DISABLE_EVENTID:
ath12k_wmi_twt_disable_event(ab, skb);
break;
case WMI_P2P_NOA_EVENTID:
ath12k_wmi_p2p_noa_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);

View file

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@ -168,10 +168,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@ -2163,6 +2159,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_11BE = 289,
WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
WMI_MAX_EXT2_SERVICE,
};
@ -2350,6 +2350,7 @@ struct ath12k_wmi_resource_config_arg {
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
bool is_reg_cc_ext_event_supported;
u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@ -2402,6 +2403,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@ -2542,9 +2544,17 @@ struct ath12k_wmi_hw_mode_cap_params {
#define WMI_MAX_HECAP_PHY_SIZE (3)
/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
* ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
*
* hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids.
*/
#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0)
#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16)
struct ath12k_wmi_mac_phy_caps_params {
__le32 hw_mode_id;
__le32 pdev_id;
__le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 supported_flags;
__le32 supported_bands;
@ -2636,13 +2646,7 @@ struct wmi_service_ready_ext2_event {
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
union {
struct {
__le16 pdev_id;
__le16 hw_link_id;
} __packed ath12k_wmi_pdev_to_link_map;
__le32 pdev_id;
};
__le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 wireless_modes_ext;
__le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE];
@ -2716,6 +2720,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_mac_addr_params vdev_macaddr;
__le32 num_cfg_txrx_streams;
__le32 pdev_id;
__le32 mbssid_flags;
__le32 mbssid_tx_vdev_id;
__le32 vdev_stats_id_valid;
__le32 vdev_stats_id;
} __packed;
@ -2764,6 +2771,10 @@ struct ath12k_wmi_ssid_params {
#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
enum wmi_vdev_mbssid_flags {
WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
};
struct wmi_vdev_start_request_cmd {
__le32 tlv_header;
__le32 vdev_id;
@ -2782,7 +2793,7 @@ struct wmi_vdev_start_request_cmd {
__le32 cac_duration_ms;
__le32 regdomain;
__le32 min_data_rate;
__le32 mbssid_flags;
__le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */
__le32 mbssid_tx_vdev_id;
__le32 eht_ops;
__le32 punct_bitmap;
@ -3146,7 +3157,7 @@ struct ath12k_wmi_element_info_arg {
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
@ -3270,24 +3281,19 @@ struct ath12k_wmi_scan_req_arg {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
union {
struct {
u32 scan_ev_started:1,
scan_ev_completed:1,
scan_ev_bss_chan:1,
scan_ev_foreign_chan:1,
scan_ev_dequeued:1,
scan_ev_preempted:1,
scan_ev_start_failed:1,
scan_ev_restarted:1,
scan_ev_foreign_chn_exit:1,
scan_ev_invalid:1,
scan_ev_gpio_timeout:1,
scan_ev_suspended:1,
scan_ev_resumed:1;
};
u32 scan_events;
};
u32 scan_ev_started:1,
scan_ev_completed:1,
scan_ev_bss_chan:1,
scan_ev_foreign_chan:1,
scan_ev_dequeued:1,
scan_ev_preempted:1,
scan_ev_start_failed:1,
scan_ev_restarted:1,
scan_ev_foreign_chn_exit:1,
scan_ev_invalid:1,
scan_ev_gpio_timeout:1,
scan_ev_suspended:1,
scan_ev_resumed:1;
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
@ -3300,36 +3306,31 @@ struct ath12k_wmi_scan_req_arg {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
union {
struct {
u32 scan_f_passive:1,
scan_f_bcast_probe:1,
scan_f_cck_rates:1,
scan_f_ofdm_rates:1,
scan_f_chan_stat_evnt:1,
scan_f_filter_prb_req:1,
scan_f_bypass_dfs_chn:1,
scan_f_continue_on_err:1,
scan_f_offchan_mgmt_tx:1,
scan_f_offchan_data_tx:1,
scan_f_promisc_mode:1,
scan_f_capture_phy_err:1,
scan_f_strict_passive_pch:1,
scan_f_half_rate:1,
scan_f_quarter_rate:1,
scan_f_force_active_dfs_chn:1,
scan_f_add_tpc_ie_in_probe:1,
scan_f_add_ds_ie_in_probe:1,
scan_f_add_spoofed_mac_in_probe:1,
scan_f_add_rand_seq_in_probe:1,
scan_f_en_ie_whitelist_in_probe:1,
scan_f_forced:1,
scan_f_2ghz:1,
scan_f_5ghz:1,
scan_f_80mhz:1;
};
u32 scan_flags;
};
u32 scan_f_passive:1,
scan_f_bcast_probe:1,
scan_f_cck_rates:1,
scan_f_ofdm_rates:1,
scan_f_chan_stat_evnt:1,
scan_f_filter_prb_req:1,
scan_f_bypass_dfs_chn:1,
scan_f_continue_on_err:1,
scan_f_offchan_mgmt_tx:1,
scan_f_offchan_data_tx:1,
scan_f_promisc_mode:1,
scan_f_capture_phy_err:1,
scan_f_strict_passive_pch:1,
scan_f_half_rate:1,
scan_f_quarter_rate:1,
scan_f_force_active_dfs_chn:1,
scan_f_add_tpc_ie_in_probe:1,
scan_f_add_ds_ie_in_probe:1,
scan_f_add_spoofed_mac_in_probe:1,
scan_f_add_rand_seq_in_probe:1,
scan_f_en_ie_whitelist_in_probe:1,
scan_f_forced:1,
scan_f_2ghz:1,
scan_f_5ghz:1,
scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@ -3489,6 +3490,37 @@ struct wmi_get_pdev_temperature_cmd {
__le32 pdev_id;
} __packed;
#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
struct wmi_p2p_noa_event {
__le32 vdev_id;
} __packed;
struct ath12k_wmi_p2p_noa_descriptor {
__le32 type_count; /* 255: continuous schedule, 0: reserved */
__le32 duration; /* Absent period duration in micro seconds */
__le32 interval; /* Absent period interval in micro seconds */
__le32 start_time; /* 32 bit tsf time when in starts */
} __packed;
#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
struct ath12k_wmi_p2p_noa_info {
/* Bit 0 - Flag to indicate an update in NOA schedule
* Bits 7-1 - Reserved
* Bits 15-8 - Index (identifies the instance of NOA sub element)
* Bit 16 - Opp PS state of the AP
* Bits 23-17 - Ctwindow in TUs
* Bits 31-24 - Number of NOA descriptors
*/
__le32 noa_attr;
struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
} __packed;
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@ -3503,6 +3535,12 @@ struct wmi_bcn_tmpl_cmd {
__le32 esp_ie_offset;
} __packed;
struct wmi_p2p_go_set_beacon_ie_cmd {
__le32 tlv_header;
__le32 vdev_id;
__le32 ie_buf_len;
} __packed;
struct wmi_vdev_install_key_cmd {
__le32 tlv_header;
__le32 vdev_id;
@ -4797,6 +4835,16 @@ struct wmi_rfkill_state_change_event {
__le32 radio_state;
} __packed;
struct wmi_twt_enable_event {
__le32 pdev_id;
__le32 status;
} __packed;
struct wmi_twt_disable_event {
__le32 pdev_id;
__le32 status;
} __packed;
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@ -4806,6 +4854,8 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn);
@ -4917,4 +4967,30 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
{
return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID);
}
static inline u32
ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param)
{
return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID);
}
static inline u32
ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param)
{
return le32_get_bits(param->pdev_and_hw_link_ids,
WMI_CAPS_PARAMS_PDEV_ID);
}
static inline u32
ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param)
{
return le32_get_bits(param->pdev_and_hw_link_ids,
WMI_CAPS_PARAMS_HW_LINK_ID);
}
#endif

View file

@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
bool initialized;
};
struct ath9k_htc_tx_ctl {
@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;

View file

@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
/* Allow ath9k_wmi_event_tasklet() to operate. */
smp_wmb();
priv->initialized = true;
return 0;
err_init:

View file

@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
/* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
smp_wmb();
priv->tx.initialized = true;
return 0;
}

View file

@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* Check if ath9k_htc_probe_device() completed. */
if (!data_race(priv->initialized)) {
kfree_skb(skb);
continue;
}
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
/* Check if ath9k_tx_init() completed. */
if (!data_race(priv->tx.initialized))
break;
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);

View file

@ -369,12 +369,11 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
while (ath_tid_dequeue(tid, &skb) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;

View file

@ -189,7 +189,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct _carl9170_tx_superframe *super;
unsigned int chunks;
int cookie = -1;