First set of iwlwifi patches intended for v5.8

* TX queue debugfs improvements;
 * Support for a few new FW API versions;
 * Remove deprecated scan FW API version;
 * New hw configs and other related changes;
 * Support for NPK buffers in debugging;
 * More debugging fetures;
 * Some other small fixes and clean-ups;
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAl6i7B8ACgkQoUecoho8
 xfpuWg//Wh6y/P/ifptPYmlgiixZxHtqtY8LwmS0o6iPRVzBPZ4aC+Kk5MzQfSmR
 AbPyRN4ECLtTfOHjkfFbhnU8fCmVqiYprPYX+3pwxo3bSSDmP/xsG9uhcqZyISle
 VcuEX6ThTgD8uF07JO2k6e3mBg/eNa77bpxD1Sumi8b0AF9uU9SZwN0bVAyQpXBl
 B+P4jEdQxICrSnq0vTGowf4JwmehkjBZHHzY12glLHZgDBMlPU8Z4YLfe3pdX1a7
 I54t3r4TIiKazSAGsIWJaLmOqoQBfbmQArsoMi/Zh3L6luwyWprdaGpJ+Rbgp/0U
 3p0QQg2QfYdBh/ds9fS5Hu52LyL8FGWeOCBMaKzl4V0yIOMmfrXOXUq+oe8F7DkS
 70HzQwKPMlK0VCiXNCt+XRR+0kxbn/3vcXbGQncEx/49IFVMMLr1+FVhf3xZd2/N
 qDDNKlCyMqiRlj4qhdKdPTFZ+JXjXUxaI8piod85aXEuyIJr85v7WE0L3L9t3ljk
 ZigyLP0r7KTpYAwgeUvDFEvsDnSCPMctIVokd0gLxf8237rzUmqsorOsRhVkBkt0
 bfWwxJQzfizFs5Q0OM7N/4Rfkkxgx7h2o3s51XH07/T7ZqEtpkVt3XjBHbg6IXtU
 vfFZK5SV7o2TI+5WutCDDpzy3/mym4Ww9z2Cnay7KJSndqpJ5Ck=
 =b15k
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2020-04-24-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

First set of iwlwifi patches intended for v5.8

* TX queue debugfs improvements;
* Support for a few new FW API versions;
* Remove deprecated scan FW API version;
* New hw configs and other related changes;
* Support for NPK buffers in debugging;
* More debugging fetures;
* Some other small fixes and clean-ups;

# gpg: Signature made Fri 24 Apr 2020 04:39:43 PM EEST using RSA key ID 1A3CC5FA
# gpg: Good signature from "Luciano Roth Coelho (Luca) <luca@coelho.fi>"
# gpg:                 aka "Luciano Roth Coelho (Intel) <luciano.coelho@intel.com>"
This commit is contained in:
Kalle Valo 2020-05-04 11:45:47 +03:00
commit c1070c1c6c
37 changed files with 805 additions and 360 deletions

View file

@ -13,7 +13,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
iwlwifi-objs += fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o

View file

@ -90,7 +90,8 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@ -120,6 +121,10 @@
IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@ -229,6 +234,15 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
};
const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
@ -238,6 +252,19 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 5000,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 1820,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
@ -250,15 +277,7 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.integrated = true,
.xtal_latency = 12000,
.low_latency_xtal = true,
};
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
/*
@ -522,22 +541,32 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
.trans.xtal_latency = 12000,
.trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@ -545,16 +574,34 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
.trans.xtal_latency = 12000,
.trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -573,3 +620,5 @@ MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View file

@ -151,6 +151,82 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i;
bool enabled;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
enabled = !!wifi_pkg->package.elements[0].integer.value;
if (!enabled) {
*black_list_size = -1;
IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
ret = 0;
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[1].integer.value >
APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[1].integer.value);
ret = -EINVAL;
goto out_free;
}
*black_list_size = wifi_pkg->package.elements[1].integer.value;
IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size);
if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
*black_list_size);
ret = -EINVAL;
goto out_free;
}
for (i = 0; i < *black_list_size; i++) {
u32 country;
if (wifi_pkg->package.elements[2 + i].type !=
ACPI_TYPE_INTEGER) {
IWL_DEBUG_RADIO(fwrt,
"TAS invalid array elem %d\n", 2 + i);
ret = -EINVAL;
goto out_free;
}
country = wifi_pkg->package.elements[2 + i].integer.value;
black_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country);
}
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;

View file

@ -64,6 +64,7 @@
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@ -75,6 +76,7 @@
#define ACPI_SPLC_METHOD "SPLC"
#define ACPI_ECKV_METHOD "ECKV"
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WIFI_DOMAIN (0x07)
@ -96,6 +98,12 @@
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
* 1 type, 1 enabled, 1 black list size, 16 black list array
*/
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
@ -174,6 +182,9 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset_group *table);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
int *black_list_size);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@ -250,5 +261,11 @@ static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */

View file

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -119,16 +119,49 @@ enum iwl_calib_cfg {
IWL_CALIB_CFG_AGC_IDX = BIT(18),
};
/**
* struct iwl_phy_specific_cfg - specific PHY filter configuration
*
* Sent as part of the phy configuration command (v3) to configure specific FW
* defined PHY filters that can be applied to each antenna.
*
* @filter_cfg_chain_a: filter config id for LMAC1 chain A
* @filter_cfg_chain_b: filter config id for LMAC1 chain B
* @filter_cfg_chain_c: filter config id for LMAC2 chain A
* @filter_cfg_chain_d: filter config id for LMAC2 chain B
* values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id
*/
struct iwl_phy_specific_cfg {
__le32 filter_cfg_chain_a;
__le32 filter_cfg_chain_b;
__le32 filter_cfg_chain_c;
__le32 filter_cfg_chain_d;
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
* struct iwl_phy_cfg_cmd - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
*/
struct iwl_phy_cfg_cmd {
struct iwl_phy_cfg_cmd_v1 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
} __packed;
/**
* struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3)
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
* @phy_specific_cfg: configure predefined PHY filters
*/
struct iwl_phy_cfg_cmd_v3 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
struct iwl_phy_specific_cfg phy_specific_cfg;
} __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */
/*
* enum iwl_dc2dc_config_id - flag ids
*

View file

@ -618,7 +618,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet_bufsize: wakeup packet buffer size
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status {
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@ -634,6 +634,43 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
* struct iwl_wowlan_status - WoWLAN status
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
* @non_qos_seq_ctr: non-QoS sequence counter to use next
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
* @transmitted_ndps: number of transmitted neighbor discovery packets
* @received_beacons: number of received beacons
* @wake_packet_length: wakeup packet length
* @wake_packet_bufsize: wakeup packet buffer size
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @reserved: unused
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
__le16 qos_seq_ctr[8];
__le32 wakeup_reasons;
__le32 num_of_gtk_rekeys;
__le32 transmitted_ndps;
__le32 received_beacons;
__le32 wake_packet_length;
__le32 wake_packet_bufsize;
u8 tid_tear_down;
u8 reserved[3];
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;

View file

@ -80,6 +80,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* response is &struct iwl_nvm_get_info_rsp
*/
NVM_GET_INFO = 0x2,
/**
* @TAS_CONFIG: &struct iwl_tas_config_cmd
*/
TAS_CONFIG = 0x3,
};
/**
@ -431,4 +436,14 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
#define IWL_TAS_BLACK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd - configures the TAS
* @black_list_size: size of relevant field in black_list_array
* @black_list_array: black list countries (without TAS)
*/
struct iwl_tas_config_cmd {
__le32 black_list_size;
__le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_nvm_reg_h__ */

View file

@ -1050,20 +1050,6 @@ struct iwl_scan_req_params_v12 {
struct iwl_scan_probe_params_v3 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
* struct iwl_scan_req_params_v13
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v4
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v4
*/
struct iwl_scan_req_params_v13 {
struct iwl_scan_general_params_v10 general_params;
struct iwl_scan_channel_params_v4 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
* struct iwl_scan_req_params_v14
* @general_params: &struct iwl_scan_general_params_v10
@ -1090,18 +1076,6 @@ struct iwl_scan_req_umac_v12 {
struct iwl_scan_req_params_v12 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
* struct iwl_scan_req_umac_v13
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
struct iwl_scan_req_umac_v13 {
__le32 uid;
__le32 ooc_priority;
struct iwl_scan_req_params_v13 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
/**
* struct iwl_scan_req_umac_v14
* @uid: scan id, &enum iwl_umac_scan_uid_offsets

View file

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Deutschland GmbH
* Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Deutschland GmbH
* Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -68,6 +66,12 @@
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
#define SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
#define SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
#define SOC_FLAGS_LTR_APPLY_DELAY_200 1
#define SOC_FLAGS_LTR_APPLY_DELAY_2500 2
#define SOC_FLAGS_LTR_APPLY_DELAY_1820 3
/**
* struct iwl_soc_configuration_cmd - Set device stabilization latency
*

View file

@ -245,32 +245,6 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
/**
* struct iwl_mvm_keyinfo - key information
* @key_flags: type &enum iwl_sta_key_flag
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @reserved1: reserved
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
* @key_offset: key offset in the fw's key table
* @reserved2: reserved
* @key: 16-byte unicast decryption key
* @tx_secur_seq_cnt: initial RSC / PN needed for replay check
* @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
* @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
*/
struct iwl_mvm_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2;
u8 reserved1;
__le16 tkip_rx_ttak[5];
u8 key_offset;
u8 reserved2;
u8 key[16];
__le64 tx_secur_seq_cnt;
__le64 hw_tkip_mic_rx_key;
__le64 hw_tkip_mic_tx_key;
} __packed;
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
#define IWL_ADD_STA_BAID_MASK 0x7F00

View file

@ -818,7 +818,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
static struct iwl_fw_error_dump_file *
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_ptrs *fw_error_dump)
struct iwl_fw_dump_ptrs *fw_error_dump,
struct iwl_fwrt_dump_data *data)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@ -900,15 +901,15 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* If we only want a monitor dump, reset the file length */
if (fwrt->dump.monitor_only) {
if (data->monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
data->desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
data->desc->len;
dump_file = vzalloc(file_len);
if (!dump_file)
@ -984,19 +985,19 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_read_radio_regs(fwrt, &dump_data);
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
data->desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
data->desc->len);
dump_trig = (void *)dump_data->data;
memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
sizeof(*dump_trig) + fwrt->dump.desc->len);
memcpy(dump_trig, &data->desc->trig_desc,
sizeof(*dump_trig) + data->desc->len);
dump_data = iwl_fw_error_next_data(dump_data);
}
/* In case we only want monitor dump, skip to dump trasport data */
if (fwrt->dump.monitor_only)
if (data->monitor_only)
goto out;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
@ -2172,7 +2173,21 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
return le32_to_cpu(hdr->file_len);
}
static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc **desc)
{
if (desc && *desc != &iwl_dump_desc_assert)
kfree(*desc);
*desc = NULL;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
fwrt->dump.umac_err_id = 0;
}
static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
@ -2180,11 +2195,11 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data);
if (!dump_file)
goto out;
return;
if (fwrt->dump.monitor_only)
if (dump_data->monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
@ -2213,9 +2228,6 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
vfree(fw_error_dump.fwrt_ptr);
vfree(fw_error_dump.trans_ptr);
out:
iwl_fw_free_dump_desc(fwrt);
}
static void iwl_dump_ini_list_free(struct list_head *list)
@ -2244,7 +2256,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
if (!file_len)
goto out;
return;
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@ -2261,9 +2273,6 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
GFP_KERNEL);
}
iwl_dump_ini_list_free(&dump_list);
out:
iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@ -2278,27 +2287,40 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
struct iwl_fwrt_wk_data *wk_data;
unsigned long idx;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_free_dump_desc(fwrt);
iwl_fw_free_dump_desc(fwrt, &desc);
return 0;
}
/* use wks[0] since dump flow prior to ini does not need to support
* consecutive triggers collection
/*
* Check there is an available worker.
* ffz return value is undefined if no zero exists,
* so check against ~0UL first.
*/
if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks))
if (fwrt->dump.active_wks == ~0UL)
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
iwl_fw_free_dump_desc(fwrt);
idx = ffz(fwrt->dump.active_wks);
if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
wk_data = &fwrt->dump.wks[idx];
if (WARN_ON(wk_data->dump_data.desc))
iwl_fw_free_dump_desc(fwrt, &wk_data->dump_data.desc);
wk_data->dump_data.desc = desc;
wk_data->dump_data.monitor_only = monitor_only;
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
fwrt->dump.desc = desc;
fwrt->dump.monitor_only = monitor_only;
schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay));
schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
return 0;
}
@ -2307,26 +2329,40 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type)
{
int ret;
struct iwl_fw_dump_desc *iwl_dump_error_desc;
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
return -EIO;
iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
if (!iwl_dump_error_desc)
return -ENOMEM;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
return -EIO;
iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
iwl_dump_error_desc->len = 0;
iwl_dbg_tlv_time_point(fwrt,
IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
NULL);
} else {
struct iwl_fw_dump_desc *iwl_dump_error_desc;
int ret;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
if (ret)
kfree(iwl_dump_error_desc);
else
iwl_trans_sync_nmi(fwrt->trans);
iwl_dump_error_desc =
kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
return ret;
if (!iwl_dump_error_desc)
return -ENOMEM;
iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
iwl_dump_error_desc->len = 0;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc,
false, 0);
if (ret) {
kfree(iwl_dump_error_desc);
return ret;
}
}
iwl_trans_sync_nmi(fwrt->trans);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
@ -2504,14 +2540,14 @@ IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx)) {
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
}
@ -2527,12 +2563,17 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
iwl_fw_error_dump(fwrt);
iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_dump_data_free(dump_data);
else
iwl_fw_free_dump_desc(fwrt, &dump_data->desc);
clear_bit(wk_idx, &fwrt->dump.active_wks);
}

View file

@ -98,17 +98,6 @@ struct iwl_fw_dbg_params {
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
{
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
fwrt->dump.umac_err_id = 0;
}
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);

View file

@ -449,6 +449,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */

View file

@ -0,0 +1,99 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "img.h"
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
return IWL_FW_CMD_VER_UNKNOWN;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd)
return entry->cmd_ver;
}
return IWL_FW_CMD_VER_UNKNOWN;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
return def;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd) {
if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
return def;
return entry->notif_ver;
}
}
return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);

View file

@ -313,22 +313,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
return IWL_FW_CMD_VER_UNKNOWN;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd)
return entry->cmd_ver;
}
return IWL_FW_CMD_VER_UNKNOWN;
}
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
#endif /* __iwl_fw_img_h__ */

View file

@ -98,8 +98,16 @@ struct iwl_fwrt_shared_mem_cfg {
* @fw_pkt: packet received from FW
*/
struct iwl_fwrt_dump_data {
struct iwl_fw_ini_trigger_tlv *trig;
struct iwl_rx_packet *fw_pkt;
union {
struct {
struct iwl_fw_ini_trigger_tlv *trig;
struct iwl_rx_packet *fw_pkt;
};
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
};
};
};
/**
@ -162,8 +170,6 @@ struct iwl_fw_runtime {
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;

View file

@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -27,9 +26,8 @@
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -284,6 +282,13 @@ struct iwl_pwr_tx_backoff {
u32 backoff;
};
enum iwl_cfg_trans_ltr_delay {
IWL_CFG_TRANS_LTR_DELAY_NONE = 0,
IWL_CFG_TRANS_LTR_DELAY_200US = 1,
IWL_CFG_TRANS_LTR_DELAY_2500US = 2,
IWL_CFG_TRANS_LTR_DELAY_1820US = 3,
};
/**
* struct iwl_cfg_trans - information needed to start the trans
*
@ -304,6 +309,7 @@ struct iwl_pwr_tx_backoff {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@ -317,7 +323,8 @@ struct iwl_cfg_trans_params {
mq_rx_supported:1,
integrated:1,
low_latency_xtal:1,
bisr_workaround:1;
bisr_workaround:1,
ltr_delay:2;
};
/**
@ -506,9 +513,10 @@ struct iwl_dev_info {
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
@ -622,9 +630,12 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */

View file

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,7 +28,7 @@
*
* BSD LICENSE
*
* Copyright (C) 2018 - 2019 Intel Corporation
* Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -170,14 +170,24 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
(buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
buf_location != IWL_FW_INI_LOCATION_DRAM_PATH &&
buf_location != IWL_FW_INI_LOCATION_NPK_PATH)) {
IWL_ERR(trans,
"WRT: Invalid allocation TLV\n");
return -EINVAL;
}
if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
(buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
(alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) {
IWL_ERR(trans,
"WRT: Allocation TLV for SMEM/NPK path must have id %u (current: %u)\n",
IWL_FW_INI_ALLOCATION_ID_DBGC1, alloc_id);
return -EINVAL;
}
if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM) {
IWL_ERR(trans,
"WRT: Invalid allocation id %u for allocation TLV\n",
alloc_id);

View file

@ -1872,10 +1872,6 @@ module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444);
MODULE_PARM_DESC(fw_monitor,
"firmware monitor - to debug FW (default: false - needs lots of memory)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");

View file

@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
@ -135,7 +134,6 @@ struct iwl_mod_params {
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
bool fw_monitor;
bool disable_11ac;
/**
* @disable_11ax: disable HE capabilities, default = false

View file

@ -1168,8 +1168,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;

View file

@ -155,5 +155,9 @@
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0
#define IWL_MVM_PHY_FILTER_CHAIN_C 0
#define IWL_MVM_PHY_FILTER_CHAIN_D 0
#endif /* __MVM_CONSTANTS_H */

View file

@ -1517,12 +1517,14 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
struct iwl_wowlan_status *v7, *status;
struct iwl_wowlan_status_v7 *v7;
struct iwl_wowlan_status *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
int ret, len, status_size;
int ret, len, status_size, data_size;
u8 notif_ver;
lockdep_assert_held(&mvm->mutex);
@ -1532,13 +1534,12 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
return ERR_PTR(ret);
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
int data_size;
status_size = sizeof(*v6);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@ -1593,23 +1594,33 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
v7 = (void *)cmd.resp_pkt->data;
status_size = sizeof(*v7);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
WOWLAN_GET_STATUSES, 0);
status_size = sizeof(*status);
if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9)
status_size = sizeof(*v7);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4);
if (len != (status_size +
ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
if (len != (status_size + data_size)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
status = kmemdup(v7, len, GFP_KERNEL);
status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
if (!status)
goto out_free_resp;
memcpy(status, v7, status_size);
memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size);
out_free_resp:
iwl_free_resp(&cmd);

View file

@ -508,8 +508,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD);
if (cmd_ver == 8)
err = iwl_mvm_ftm_start_v8(mvm, vif, req);

View file

@ -136,8 +136,8 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_CONFIG_CMD);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_CONFIG_CMD);
int err;
lockdep_assert_held(&mvm->mutex);

View file

@ -102,9 +102,23 @@ static int iwl_set_soc_latency(struct iwl_mvm *mvm)
if (!mvm->trans->trans_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC) >= 2 &&
(mvm->trans->trans_cfg->low_latency_xtal))
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
SOC_FLAGS_LTR_APPLY_DELAY_NONE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
SOC_FLAGS_LTR_APPLY_DELAY_200);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
SOC_FLAGS_LTR_APPLY_DELAY_2500);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
SOC_FLAGS_LTR_APPLY_DELAY_1820);
if (mvm->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
!WARN_ON(!mvm->trans->trans_cfg->integrated))
cmd.flags |= le32_encode_bits(mvm->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC) >= 2 &&
mvm->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
@ -550,10 +564,49 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
return ret;
}
#ifdef CONFIG_ACPI
static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
struct iwl_phy_specific_cfg *phy_filters)
{
/*
* TODO: read specific phy config from BIOS
* ACPI table for this feature has not been defined yet,
* so for now we use hardcoded values.
*/
if (IWL_MVM_PHY_FILTER_CHAIN_A) {
phy_filters->filter_cfg_chain_a =
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
}
if (IWL_MVM_PHY_FILTER_CHAIN_B) {
phy_filters->filter_cfg_chain_b =
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
}
if (IWL_MVM_PHY_FILTER_CHAIN_C) {
phy_filters->filter_cfg_chain_c =
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
}
if (IWL_MVM_PHY_FILTER_CHAIN_D) {
phy_filters->filter_cfg_chain_d =
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
}
}
#else /* CONFIG_ACPI */
static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
struct iwl_phy_specific_cfg *phy_filters)
{
}
#endif /* CONFIG_ACPI */
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd phy_cfg_cmd;
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
struct iwl_phy_specific_cfg phy_filters = {};
u8 cmd_ver;
size_t cmd_size;
if (iwl_mvm_has_unified_ucode(mvm) &&
!mvm->trans->cfg->tx_with_siso_diversity)
@ -580,11 +633,20 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
PHY_CONFIGURATION_CMD);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
sizeof(struct iwl_phy_specific_cfg));
}
IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
phy_cfg_cmd.phy_cfg);
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
sizeof(struct iwl_phy_cfg_cmd_v1);
return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
@ -931,6 +993,40 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
int ret;
struct iwl_tas_config_cmd cmd = {};
int list_size;
BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
ret);
return;
}
if (list_size < 0)
return;
/* list size if TAS enabled can only be non-negative */
cmd.black_list_size = cpu_to_le32((u32)list_size);
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
TAS_CONFIG),
0, sizeof(cmd), &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
#else /* CONFIG_ACPI */
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
@ -958,6 +1054,10 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
return 0;
}
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@ -1285,6 +1385,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret < 0)
goto error;
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");

View file

@ -1264,7 +1264,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
iwl_fw_free_dump_desc(&mvm->fwrt);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);

View file

@ -2149,8 +2149,8 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
u8 ver = iwl_mvm_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
}

View file

@ -505,6 +505,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
HCMD_NAME(NVM_GET_INFO),
HCMD_NAME(TAS_CONFIG),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
@ -612,27 +613,6 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!mvm->fw->ucode_capa.cmd_versions ||
!mvm->fw->ucode_capa.n_cmd_versions)
return def;
entry = mvm->fw->ucode_capa.cmd_versions;
for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd) {
if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
return def;
return entry->notif_ver;
}
}
return def;
}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -745,7 +725,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
mvm->cmd_ver.d0i3_resp =
iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
0);
/* we only support version 1 */
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;

View file

@ -3740,11 +3740,12 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
"0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
"0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}

View file

@ -2051,40 +2051,6 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
{
struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd;
struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params;
int ret;
u16 gen_flags;
u32 bitmap_ssid = 0;
mvm->scan_uid_status[uid] = type;
cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
&scan_p->general_params,
gen_flags);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
if (ret)
return ret;
iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
&bitmap_ssid);
iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
&scan_p->channel_params, bitmap_ssid);
return 0;
}
static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
@ -2235,7 +2201,6 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(14),
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
};
@ -2263,8 +2228,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC);
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
@ -2594,7 +2559,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
}
@ -2604,8 +2568,8 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC);
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC);
base_size = iwl_scan_req_umac_get_size(scan_ver);
if (base_size)

View file

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -18,7 +18,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -84,32 +84,35 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
IWL_FW_INI_LOCATION_SRAM_PATH) {
switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
case IWL_FW_INI_LOCATION_SRAM_PATH:
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
IWL_DEBUG_FW(trans,
"WRT: Applying SMEM buffer destination\n");
goto out;
}
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
IWL_FW_INI_LOCATION_DRAM_PATH &&
trans->dbg.fw_mon_ini[alloc_id].num_frags) {
struct iwl_dram_data *frag =
&trans->dbg.fw_mon_ini[alloc_id].frags[0];
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
"WRT: Applying SMEM buffer destination\n");
break;
case IWL_FW_INI_LOCATION_NPK_PATH:
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM destination (alloc_id=%u)\n",
alloc_id);
"WRT: Applying NPK buffer destination\n");
break;
dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
dbg_cfg->hwm_size = cpu_to_le32(frag->size);
case IWL_FW_INI_LOCATION_DRAM_PATH:
if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
struct iwl_dram_data *frag =
&trans->dbg.fw_mon_ini[alloc_id].frags[0];
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
dbg_cfg->hwm_size = cpu_to_le32(frag->size);
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
alloc_id,
trans->dbg.fw_mon_ini[alloc_id].num_frags);
}
break;
default:
IWL_ERR(trans, "WRT: Invalid buffer destination\n");
}
out:
if (dbg_flags)
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;

View file

@ -93,6 +93,21 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
const struct fw_desc *sec,
struct iwl_dram_data *dram)
{
dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
&dram->physical);
if (!dram->block)
return -ENOMEM;
dram->size = sec->len;
memcpy(dram->block, sec->data, sec->len);
return 0;
}
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;

View file

@ -524,8 +524,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Qu devices */
{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
@ -539,12 +541,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
{IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
{IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
{IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@ -657,6 +664,19 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),

View file

@ -792,22 +792,6 @@ static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
return i;
}
static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
const struct fw_desc *sec,
struct iwl_dram_data *dram)
{
dram->block = dma_alloc_coherent(trans->dev, sec->len,
&dram->physical,
GFP_KERNEL);
if (!dram->block)
return -ENOMEM;
dram->size = sec->len;
memcpy(dram->block, sec->data, sec->len);
return 0;
}
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;

View file

@ -70,6 +70,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/seq_file.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@ -1018,21 +1019,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
return ret;
}
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
iwl_pcie_alloc_fw_monitor(trans, 0);
if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
(fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
}
iwl_enable_interrupts(trans);
@ -2544,44 +2532,95 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.llseek = generic_file_llseek, \
};
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
struct iwl_dbgfs_tx_queue_priv {
struct iwl_trans *trans;
};
struct iwl_dbgfs_tx_queue_state {
loff_t pos;
};
static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state;
if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->pos = *pos;
return state;
}
static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
void *v, loff_t *pos)
{
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
*pos = ++state->pos;
if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
return NULL;
return state;
}
static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
{
kfree(v);
}
static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
{
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
char *buf;
int pos = 0;
int cnt;
int ret;
size_t bufsz;
struct iwl_txq *txq = trans_pcie->txq[state->pos];
bufsz = sizeof(char) * 75 *
trans->trans_cfg->base_params->num_of_queues;
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
!!test_bit(state->pos, trans_pcie->queue_used),
!!test_bit(state->pos, trans_pcie->queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
txq->read_ptr, txq->write_ptr,
txq->need_update, txq->frozen,
txq->n_window, txq->ampdu);
else
seq_puts(seq, "(unallocated)");
if (!trans_pcie->txq_memory)
return -EAGAIN;
if (state->pos == trans_pcie->cmd_queue)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return 0;
}
static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
.start = iwl_dbgfs_tx_queue_seq_start,
.next = iwl_dbgfs_tx_queue_seq_next,
.stop = iwl_dbgfs_tx_queue_seq_stop,
.show = iwl_dbgfs_tx_queue_seq_show,
};
static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
{
struct iwl_dbgfs_tx_queue_priv *priv;
priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
sizeof(*priv));
if (!priv)
return -ENOMEM;
for (cnt = 0;
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
cnt, txq->read_ptr, txq->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
txq->need_update, txq->frozen,
(cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
priv->trans = inode->i_private;
return 0;
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
@ -2914,9 +2953,15 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_tx_queue_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
.read = iwl_dbgfs_monitor_data_read,

View file

@ -90,9 +90,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
@ -102,7 +100,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
num_tbs * sizeof(struct iwl_tfh_tb);
num_tbs * sizeof(struct iwl_tfh_tb);
/*
* filled_tfd_size contains the number of filled bytes in the TFD.
* Dividing it by 64 will give the number of chunks to fetch
@ -114,12 +112,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
/* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);