Major changes:

brcmfmac:
 
 * sdio improvements
 * add a debugfs file so users can provide us all the revinfo we could
   ask for
 
 iwlwifi:
 
 * add triggers for firmware dump collection
 * remove support for -9.ucode
 * new statitics API
 * rate control improvements
 
 ath9k:
 
 * add per-vif TX power capability
 * BT coexistance fixes
 
 ath10k:
 
 * qca6174: enable STA transmit beamforming (TxBF) support
 * disable multi-vif power save by default
 
 bcma:
 
 * enable support for PCIe Gen 2 host devices
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQEcBAABAgAGBQJU+dliAAoJEG4XJFUm622bsqQH/RO1Gxuw6hmiHPeeIcoDmlvt
 MZKvy6xcAiFqREfGwDxjVminlTZ7/MB9bABeaoQKzpQFpCJW/ftjIqwfbRqZWsvG
 3IC0s2nPTwWU8YSsZTbifnyXCVNQDJuE+5nQ3hMO2rE/dZDi1zt1fS2hiSXtlASS
 kgBJcfXgoVxvhZ1WI+uVpbU0RtwXmI7tVylREE1sbgCrg7AuJx4Q2QmZ1GioPRLy
 20HnFVFcIcbHk4eXVwAJOspdjctujoR858pg/oxlcVXWb7MOOCV/Fk8WMursZxFh
 qj/I/kbDcFYh3H5uC+6qL/kRByY80/yckLDiMbghA0QR5/PSx2nvp/UfkqIf008=
 =qgVl
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2015-03-06' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Major changes:

brcmfmac:

* sdio improvements
* add a debugfs file so users can provide us all the revinfo we could
  ask for

iwlwifi:

* add triggers for firmware dump collection
* remove support for -9.ucode
* new statitics API
* rate control improvements

ath9k:

* add per-vif TX power capability
* BT coexistance fixes

ath10k:

* qca6174: enable STA transmit beamforming (TxBF) support
* disable multi-vif power save by default

bcma:

* enable support for PCIe Gen 2 host devices

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-03-06 15:46:08 -05:00
commit 28c0f02ffe
137 changed files with 3309 additions and 2345 deletions

View file

@ -1,6 +1,6 @@
config BCMA_POSSIBLE config BCMA_POSSIBLE
bool bool
depends on HAS_IOMEM && HAS_DMA depends on HAS_IOMEM && HAS_DMA && PCI
default y default y
menu "Broadcom specific AMBA" menu "Broadcom specific AMBA"
@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
config BCMA_HOST_PCI config BCMA_HOST_PCI
bool "Support for BCMA on PCI-host bus" bool "Support for BCMA on PCI-host bus"
depends on BCMA_HOST_PCI_POSSIBLE depends on BCMA_HOST_PCI_POSSIBLE
select BCMA_DRIVER_PCI
default y default y
config BCMA_DRIVER_PCI_HOSTMODE config BCMA_DRIVER_PCI_HOSTMODE
@ -44,6 +45,22 @@ config BCMA_HOST_SOC
If unsure, say N If unsure, say N
# TODO: make it depend on PCI when ready
config BCMA_DRIVER_PCI
bool
default y
help
BCMA bus may have many versions of PCIe core. This driver
supports:
1) PCIe core working in clientmode
2) PCIe Gen 2 clientmode core
In general PCIe (Gen 2) clientmode core is required on PCIe
hosted buses. It's responsible for initialization and basic
hardware management.
This driver is also prerequisite for a hostmode PCIe core
support.
config BCMA_DRIVER_MIPS config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver" bool "BCMA Broadcom MIPS core driver"
depends on BCMA && MIPS depends on BCMA && MIPS

View file

@ -3,8 +3,8 @@ bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_chipcommon_b.o bcma-y += driver_chipcommon_b.o
bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
bcma-y += driver_pci.o bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o
bcma-y += driver_pcie2.o bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pcie2.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o

View file

@ -26,6 +26,7 @@ bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout); int timeout);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core); void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
void bcma_init_bus(struct bcma_bus *bus); void bcma_init_bus(struct bcma_bus *bus);
void bcma_unregister_cores(struct bcma_bus *bus);
int bcma_bus_register(struct bcma_bus *bus); int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus);
@ -42,6 +43,9 @@ int bcma_bus_scan(struct bcma_bus *bus);
int bcma_sprom_get(struct bcma_bus *bus); int bcma_sprom_get(struct bcma_bus *bus);
/* driver_chipcommon.c */ /* driver_chipcommon.c */
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
#ifdef CONFIG_BCMA_DRIVER_MIPS #ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc); void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
extern struct platform_device bcma_pflash_dev; extern struct platform_device bcma_pflash_dev;
@ -52,6 +56,8 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb); void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb);
/* driver_chipcommon_pmu.c */ /* driver_chipcommon_pmu.c */
void bcma_pmu_early_init(struct bcma_drv_cc *cc);
void bcma_pmu_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc); u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc); u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
@ -101,6 +107,14 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
/* driver_pci.c */ /* driver_pci.c */
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
void bcma_core_pci_init(struct bcma_drv_pci *pc);
void bcma_core_pci_up(struct bcma_drv_pci *pc);
void bcma_core_pci_down(struct bcma_drv_pci *pc);
/* driver_pcie2.c */
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
@ -117,6 +131,39 @@ static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
} }
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
/**************************************************
* driver_mips.c
**************************************************/
#ifdef CONFIG_BCMA_DRIVER_MIPS
unsigned int bcma_core_mips_irq(struct bcma_device *dev);
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
void bcma_core_mips_init(struct bcma_drv_mips *mcore);
#else
static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
return 0;
}
static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
}
static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
}
#endif
/**************************************************
* driver_gmac_cmn.c
**************************************************/
#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
#else
static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
{
}
#endif
#ifdef CONFIG_BCMA_DRIVER_GPIO #ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */ /* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc); int bcma_gpio_init(struct bcma_drv_cc *cc);

View file

@ -76,7 +76,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0); bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
} }
#if IS_BUILTIN(CONFIG_BCM47XX) #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{ {
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
@ -215,7 +215,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value; chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input; chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output; chip->direction_output = bcma_gpio_direction_output;
#if IS_BUILTIN(CONFIG_BCM47XX) #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
chip->to_irq = bcma_gpio_to_irq; chip->to_irq = bcma_gpio_to_irq;
#endif #endif
#if IS_BUILTIN(CONFIG_OF) #if IS_BUILTIN(CONFIG_OF)

View file

@ -282,21 +282,21 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
} }
EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
bool enable) bool enable)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
u32 coremask, tmp; u32 coremask, tmp;
int err = 0; int err = 0;
if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
/* This bcma device is not on a PCI host-bus. So the IRQs are /* This bcma device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core. * not routed through the PCI core.
* So we must not enable routing through the PCI core. */ * So we must not enable routing through the PCI core. */
goto out; goto out;
} }
pdev = pc->core->bus->host_pci; pdev = bus->host_pci;
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
if (err) if (err)
@ -328,28 +328,12 @@ static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
} }
void bcma_core_pci_up(struct bcma_bus *bus) void bcma_core_pci_up(struct bcma_drv_pci *pc)
{ {
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_extend_L1timer(pc, true); bcma_core_pci_extend_L1timer(pc, true);
} }
EXPORT_SYMBOL_GPL(bcma_core_pci_up);
void bcma_core_pci_down(struct bcma_bus *bus) void bcma_core_pci_down(struct bcma_drv_pci *pc)
{ {
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_extend_L1timer(pc, false); bcma_core_pci_extend_L1timer(pc, false);
} }
EXPORT_SYMBOL_GPL(bcma_core_pci_down);

View file

@ -11,6 +11,7 @@
#include "bcma_private.h" #include "bcma_private.h"
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/bcma/bcma.h> #include <linux/bcma/bcma.h>
#include <asm/paccess.h> #include <asm/paccess.h>

View file

@ -10,6 +10,7 @@
#include "bcma_private.h" #include "bcma_private.h"
#include <linux/bcma/bcma.h> #include <linux/bcma/bcma.h>
#include <linux/pci.h>
/************************************************** /**************************************************
* R/W ops. * R/W ops.
@ -156,14 +157,23 @@ static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2) void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
{ {
struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo; struct bcma_bus *bus = pcie2->core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
u32 tmp; u32 tmp;
tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54)); tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
if ((tmp & 0xe) >> 1 == 2) if ((tmp & 0xe) >> 1 == 2)
bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17); bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
/* TODO: Do we need pcie_reqsize? */ switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4360:
case BCMA_CHIP_ID_BCM4352:
pcie2->reqsize = 1024;
break;
default:
pcie2->reqsize = 128;
break;
}
if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3) if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
bcma_core_pcie2_war_delay_perst_enab(pcie2, true); bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
@ -173,3 +183,18 @@ void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
pciedev_crwlpciegen2_180(pcie2); pciedev_crwlpciegen2_180(pcie2);
pciedev_crwlpciegen2_182(pcie2); pciedev_crwlpciegen2_182(pcie2);
} }
/**************************************************
* Runtime ops.
**************************************************/
void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
{
struct bcma_bus *bus = pcie2->core->bus;
struct pci_dev *dev = bus->host_pci;
int err;
err = pcie_set_readrq(dev, pcie2->reqsize);
if (err)
bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
}

View file

@ -213,16 +213,26 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
/* Initialize struct, detect chip */ /* Initialize struct, detect chip */
bcma_init_bus(bus); bcma_init_bus(bus);
/* Scan bus to find out generation of PCIe core */
err = bcma_bus_scan(bus);
if (err)
goto err_pci_unmap_mmio;
if (bcma_find_core(bus, BCMA_CORE_PCIE2))
bus->host_is_pcie2 = true;
/* Register */ /* Register */
err = bcma_bus_register(bus); err = bcma_bus_register(bus);
if (err) if (err)
goto err_pci_unmap_mmio; goto err_unregister_cores;
pci_set_drvdata(dev, bus); pci_set_drvdata(dev, bus);
out: out:
return err; return err;
err_unregister_cores:
bcma_unregister_cores(bus);
err_pci_unmap_mmio: err_pci_unmap_mmio:
pci_iounmap(dev, bus->mmio); pci_iounmap(dev, bus->mmio);
err_pci_release_regions: err_pci_release_regions:
@ -283,9 +293,12 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
@ -310,3 +323,31 @@ void __exit bcma_host_pci_exit(void)
{ {
pci_unregister_driver(&bcma_pci_bridge_driver); pci_unregister_driver(&bcma_pci_bridge_driver);
} }
/**************************************************
* Runtime ops for drivers.
**************************************************/
/* See also pcicore_up */
void bcma_host_pci_up(struct bcma_bus *bus)
{
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
if (bus->host_is_pcie2)
bcma_core_pcie2_up(&bus->drv_pcie2);
else
bcma_core_pci_up(&bus->drv_pci[0]);
}
EXPORT_SYMBOL_GPL(bcma_host_pci_up);
/* See also pcicore_down */
void bcma_host_pci_down(struct bcma_bus *bus)
{
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
if (!bus->host_is_pcie2)
bcma_core_pci_down(&bus->drv_pci[0]);
}
EXPORT_SYMBOL_GPL(bcma_host_pci_down);

View file

@ -363,7 +363,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
return 0; return 0;
} }
static void bcma_unregister_cores(struct bcma_bus *bus) void bcma_unregister_cores(struct bcma_bus *bus)
{ {
struct bcma_device *core, *tmp; struct bcma_device *core, *tmp;

View file

@ -39,7 +39,7 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
#define CE_DESC_FLAGS_META_DATA_LSB 3 #define CE_DESC_FLAGS_META_DATA_LSB 2
struct ce_desc { struct ce_desc {
__le32 addr; __le32 addr;

View file

@ -436,16 +436,16 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
static void ath10k_core_free_firmware_files(struct ath10k *ar) static void ath10k_core_free_firmware_files(struct ath10k *ar)
{ {
if (ar->board && !IS_ERR(ar->board)) if (!IS_ERR(ar->board))
release_firmware(ar->board); release_firmware(ar->board);
if (ar->otp && !IS_ERR(ar->otp)) if (!IS_ERR(ar->otp))
release_firmware(ar->otp); release_firmware(ar->otp);
if (ar->firmware && !IS_ERR(ar->firmware)) if (!IS_ERR(ar->firmware))
release_firmware(ar->firmware); release_firmware(ar->firmware);
if (ar->cal_file && !IS_ERR(ar->cal_file)) if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file); release_firmware(ar->cal_file);
ar->board = NULL; ar->board = NULL;

View file

@ -159,6 +159,25 @@ struct ath10k_fw_stats_peer {
u32 peer_rx_rate; /* 10x only */ u32 peer_rx_rate; /* 10x only */
}; };
struct ath10k_fw_stats_vdev {
struct list_head list;
u32 vdev_id;
u32 beacon_snr;
u32 data_snr;
u32 num_tx_frames[4];
u32 num_rx_frames;
u32 num_tx_frames_retries[4];
u32 num_tx_frames_failures[4];
u32 num_rts_fail;
u32 num_rts_success;
u32 num_rx_err;
u32 num_rx_discard;
u32 num_tx_not_acked;
u32 tx_rate_history[10];
u32 beacon_rssi_history[10];
};
struct ath10k_fw_stats_pdev { struct ath10k_fw_stats_pdev {
struct list_head list; struct list_head list;
@ -220,6 +239,7 @@ struct ath10k_fw_stats_pdev {
struct ath10k_fw_stats { struct ath10k_fw_stats {
struct list_head pdevs; struct list_head pdevs;
struct list_head vdevs;
struct list_head peers; struct list_head peers;
}; };
@ -288,6 +308,7 @@ struct ath10k_vif {
bool is_started; bool is_started;
bool is_up; bool is_up;
bool spectral_enabled; bool spectral_enabled;
bool ps;
u32 aid; u32 aid;
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
@ -413,6 +434,12 @@ enum ath10k_fw_features {
*/ */
ATH10K_FW_FEATURE_WMI_10_2 = 4, ATH10K_FW_FEATURE_WMI_10_2 = 4,
/* Some firmware revisions lack proper multi-interface client powersave
* implementation. Enabling PS could result in connection drops,
* traffic stalls, etc.
*/
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
/* keep last */ /* keep last */
ATH10K_FW_FEATURE_COUNT, ATH10K_FW_FEATURE_COUNT,
}; };

View file

@ -243,6 +243,16 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
} }
} }
static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
{
struct ath10k_fw_stats_vdev *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
static void ath10k_debug_fw_stats_peers_free(struct list_head *head) static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
{ {
struct ath10k_fw_stats_peer *i, *tmp; struct ath10k_fw_stats_peer *i, *tmp;
@ -258,6 +268,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false; ar->debug.fw_stats_done = false;
ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
@ -273,14 +284,27 @@ static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
return num; return num;
} }
static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
{
struct ath10k_fw_stats_vdev *i;
size_t num = 0;
list_for_each_entry(i, head, list)
++num;
return num;
}
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{ {
struct ath10k_fw_stats stats = {}; struct ath10k_fw_stats stats = {};
bool is_start, is_started, is_end; bool is_start, is_started, is_end;
size_t num_peers; size_t num_peers;
size_t num_vdevs;
int ret; int ret;
INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.pdevs);
INIT_LIST_HEAD(&stats.vdevs);
INIT_LIST_HEAD(&stats.peers); INIT_LIST_HEAD(&stats.peers);
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
@ -308,6 +332,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
} }
num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
is_start = (list_empty(&ar->debug.fw_stats.pdevs) && is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
!list_empty(&stats.pdevs)); !list_empty(&stats.pdevs));
is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@ -330,7 +355,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
goto free; goto free;
} }
if (num_vdevs >= BITS_PER_LONG) {
ath10k_warn(ar, "dropping fw vdev stats\n");
goto free;
}
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
} }
complete(&ar->debug.fw_stats_complete); complete(&ar->debug.fw_stats_complete);
@ -340,6 +371,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* resources if that is not the case. * resources if that is not the case.
*/ */
ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
ath10k_debug_fw_stats_peers_free(&stats.peers); ath10k_debug_fw_stats_peers_free(&stats.peers);
unlock: unlock:
@ -363,7 +395,10 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete); reinit_completion(&ar->debug.fw_stats_complete);
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); ret = ath10k_wmi_request_stats(ar,
WMI_STAT_PDEV |
WMI_STAT_VDEV |
WMI_STAT_PEER);
if (ret) { if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret); ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret; return ret;
@ -395,8 +430,11 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
unsigned int len = 0; unsigned int len = 0;
unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
const struct ath10k_fw_stats_pdev *pdev; const struct ath10k_fw_stats_pdev *pdev;
const struct ath10k_fw_stats_vdev *vdev;
const struct ath10k_fw_stats_peer *peer; const struct ath10k_fw_stats_peer *peer;
size_t num_peers; size_t num_peers;
size_t num_vdevs;
int i;
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
@ -408,6 +446,7 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
} }
num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers); num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n", len += scnprintf(buf + len, buf_len - len, "%30s\n",
@ -529,6 +568,65 @@ static void ath10k_fw_stats_fill(struct ath10k *ar,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k VDEV stats", num_vdevs);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
list_for_each_entry(vdev, &fw_stats->vdevs, list) {
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"vdev id", vdev->vdev_id);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"beacon snr", vdev->beacon_snr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"data snr", vdev->data_snr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx frames", vdev->num_rx_frames);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rts fail", vdev->num_rts_fail);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rts success", vdev->num_rts_success);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx err", vdev->num_rx_err);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx discard", vdev->num_rx_discard);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num tx not acked", vdev->num_tx_not_acked);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames", i,
vdev->num_tx_frames[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames retries", i,
vdev->num_tx_frames_retries[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames failures", i,
vdev->num_tx_frames_failures[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] 0x%08x\n",
"tx rate history", i,
vdev->tx_rate_history[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"beacon rssi history", i,
vdev->beacon_rssi_history[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k PEER stats", num_peers); "ath10k PEER stats", num_peers);
@ -1900,6 +1998,7 @@ int ath10k_debug_create(struct ath10k *ar)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers); INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
return 0; return 0;

View file

@ -176,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
* automatically balances load wrt to CPU power. * automatically balances load wrt to CPU power.
* *
* This probably comes at a cost of lower maximum throughput but * This probably comes at a cost of lower maximum throughput but
* improves the avarage and stability. */ * improves the average and stability. */
spin_lock_bh(&htt->rx_ring.lock); spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);

View file

@ -611,7 +611,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
ret = ath10k_vdev_setup_sync(ar); ret = ath10k_vdev_setup_sync(ar);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n", ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
vdev_id, ret); vdev_id, ret);
return ret; return ret;
} }
@ -658,7 +658,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ret = ath10k_vdev_setup_sync(ar); ret = ath10k_vdev_setup_sync(ar);
if (ret) if (ret)
ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n", ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret); ar->monitor_vdev_id, ret);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
@ -927,8 +927,9 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
ret = ath10k_vdev_setup_sync(ar); ret = ath10k_vdev_setup_sync(ar);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n", ath10k_warn(ar,
arg.vdev_id, ret); "failed to synchronize setup for vdev %i restart %d: %d\n",
arg.vdev_id, restart, ret);
return ret; return ret;
} }
@ -966,7 +967,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_vdev_setup_sync(ar); ret = ath10k_vdev_setup_sync(ar);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
arvif->vdev_id, ret); arvif->vdev_id, ret);
return ret; return ret;
} }
@ -1253,6 +1254,20 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
return 0; return 0;
} }
static int ath10k_mac_ps_vif_count(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int num = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->ps)
num++;
return num;
}
static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{ {
struct ath10k *ar = arvif->ar; struct ath10k *ar = arvif->ar;
@ -1262,13 +1277,24 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enum wmi_sta_ps_mode psmode; enum wmi_sta_ps_mode psmode;
int ret; int ret;
int ps_timeout; int ps_timeout;
bool enable_ps;
lockdep_assert_held(&arvif->ar->conf_mutex); lockdep_assert_held(&arvif->ar->conf_mutex);
if (arvif->vif->type != NL80211_IFTYPE_STATION) if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0; return 0;
if (vif->bss_conf.ps) { enable_ps = arvif->ps;
if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
ar->fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
arvif->vdev_id);
enable_ps = false;
}
if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED; psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME; param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@ -1781,6 +1807,68 @@ static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
ath10k_smps_map[smps]); ath10k_smps_map[smps]);
} }
static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta_vht_cap vht_cap)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
u32 param;
u32 value;
if (!(ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
return 0;
param = ar->wmi.vdev_param->txbf;
value = 0;
if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
return 0;
/* The following logic is correct. If a remote STA advertises support
* for being a beamformer then we should enable us being a beamformee.
*/
if (ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
}
if (ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
}
if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
if (ret) {
ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
value, ret);
return ret;
}
return 0;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */ /* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw, static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
@ -1789,6 +1877,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
struct wmi_peer_assoc_complete_arg peer_arg; struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta; struct ieee80211_sta *ap_sta;
int ret; int ret;
@ -1811,6 +1900,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
/* ap_sta must be accessed only within rcu section which must be left /* ap_sta must be accessed only within rcu section which must be left
* before calling ath10k_setup_peer_smps() which might sleep. */ * before calling ath10k_setup_peer_smps() which might sleep. */
ht_cap = ap_sta->ht_cap; ht_cap = ap_sta->ht_cap;
vht_cap = ap_sta->vht_cap;
ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) { if (ret) {
@ -1836,6 +1926,13 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return; return;
} }
ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
if (ret) {
ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
arvif->vdev_id, bss_conf->bssid, ret);
return;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n", "mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid); arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
@ -1853,6 +1950,18 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
} }
arvif->is_up = true; arvif->is_up = true;
/* Workaround: Some firmware revisions (tested with qca6174
* WLAN.RM.2.0-00073) have buggy powersave state machine and must be
* poked with peer param command.
*/
ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
WMI_PEER_DUMMY_VAR, 1);
if (ret) {
ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
} }
static void ath10k_bss_disassoc(struct ieee80211_hw *hw, static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
@ -1860,6 +1969,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ieee80211_sta_vht_cap vht_cap = {};
int ret; int ret;
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
@ -1874,6 +1984,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
arvif->def_wep_key_idx = -1; arvif->def_wep_key_idx = -1;
ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
if (ret) {
ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
arvif->vdev_id, ret);
return;
}
arvif->is_up = false; arvif->is_up = false;
} }
@ -2554,6 +2671,17 @@ static int ath10k_start_scan(struct ath10k *ar,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
/* If we failed to start the scan, return error code at
* this point. This is probably due to some issue in the
* firmware, but no need to wedge the driver due to that...
*/
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == ATH10K_SCAN_IDLE) {
spin_unlock_bh(&ar->data_lock);
return -EINVAL;
}
spin_unlock_bh(&ar->data_lock);
/* Add a 200ms margin to account for event/command processing */ /* Add a 200ms margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time+200)); msecs_to_jiffies(arg->max_scan_time+200));
@ -3323,9 +3451,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
list_del(&arvif->list); list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
vif->addr);
if (ret) if (ret)
ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n", ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret); arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data); kfree(arvif->u.ap.noa_data);
@ -3339,6 +3468,21 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret); arvif->vdev_id, ret);
/* Some firmware revisions don't notify host about self-peer removal
* until after associated vdev is deleted.
*/
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
vif->addr);
if (ret)
ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
arvif->vdev_id, ret);
spin_lock_bh(&ar->data_lock);
ar->num_peers--;
spin_unlock_bh(&ar->data_lock);
}
ath10k_peer_cleanup(ar, arvif->vdev_id); ath10k_peer_cleanup(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
@ -3534,7 +3678,9 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
} }
if (changed & BSS_CHANGED_PS) { if (changed & BSS_CHANGED_PS) {
ret = ath10k_mac_vif_setup_ps(arvif); arvif->ps = vif->bss_conf.ps;
ret = ath10k_config_ps(ar);
if (ret) if (ret)
ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
arvif->vdev_id, ret); arvif->vdev_id, ret);

View file

@ -104,7 +104,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
{ {
.flags = CE_ATTR_FLAGS, .flags = CE_ATTR_FLAGS,
.src_nentries = 0, .src_nentries = 0,
.src_sz_max = 512, .src_sz_max = 2048,
.dest_nentries = 512, .dest_nentries = 512,
}, },
@ -174,7 +174,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
.pipenum = __cpu_to_le32(1), .pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN), .pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32), .nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(512), .nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS), .flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0), .reserved = __cpu_to_le32(0),
}, },

View file

@ -110,8 +110,7 @@ struct wmi_ops {
bool deliver_cab); bool deliver_cab);
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg); const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
enum wmi_stats_id stats_id);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type, enum wmi_force_fw_hang_type type,
u32 delay_ms); u32 delay_ms);
@ -816,14 +815,14 @@ ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
} }
static inline int static inline int
ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_stats) if (!ar->wmi.ops->gen_request_stats)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_request_stats(ar, stats_id); skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);

View file

@ -869,16 +869,57 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
return 0; return 0;
} }
static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
struct ath10k_fw_stats_vdev *dst)
{
int i;
dst->vdev_id = __le32_to_cpu(src->vdev_id);
dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
dst->data_snr = __le32_to_cpu(src->data_snr);
dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
dst->num_tx_frames[i] =
__le32_to_cpu(src->num_tx_frames[i]);
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
dst->num_tx_frames_retries[i] =
__le32_to_cpu(src->num_tx_frames_retries[i]);
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
dst->num_tx_frames_failures[i] =
__le32_to_cpu(src->num_tx_frames_failures[i]);
for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
dst->tx_rate_history[i] =
__le32_to_cpu(src->tx_rate_history[i]);
for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
dst->beacon_rssi_history[i] =
__le32_to_cpu(src->beacon_rssi_history[i]);
}
static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb, struct sk_buff *skb,
struct ath10k_fw_stats *stats) struct ath10k_fw_stats *stats)
{ {
const void **tb; const void **tb;
const struct wmi_stats_event *ev; const struct wmi_tlv_stats_ev *ev;
const void *data; const void *data;
u32 num_pdev_stats, num_vdev_stats, num_peer_stats; u32 num_pdev_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
u32 num_bcnflt_stats;
u32 num_chan_stats;
size_t data_len; size_t data_len;
int ret; int ret;
int i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) { if (IS_ERR(tb)) {
@ -899,8 +940,73 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
WARN_ON(1); /* FIXME: not implemented yet */ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
num_pdev_stats, num_vdev_stats, num_peer_stats,
num_bcnflt_stats, num_chan_stats);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
struct ath10k_fw_stats_pdev *dst;
src = data;
if (data_len < sizeof(*src))
return -EPROTO;
data += sizeof(*src);
data_len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
list_add_tail(&dst->list, &stats->pdevs);
}
for (i = 0; i < num_vdev_stats; i++) {
const struct wmi_tlv_vdev_stats *src;
struct ath10k_fw_stats_vdev *dst;
src = data;
if (data_len < sizeof(*src))
return -EPROTO;
data += sizeof(*src);
data_len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_tlv_pull_vdev_stats(src, dst);
list_add_tail(&dst->list, &stats->vdevs);
}
for (i = 0; i < num_peer_stats; i++) {
const struct wmi_10x_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
src = data;
if (data_len < sizeof(*src))
return -EPROTO;
data += sizeof(*src);
data_len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_peer_stats(&src->old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
list_add_tail(&dst->list, &stats->peers);
}
kfree(tb); kfree(tb);
return 0; return 0;
@ -1604,14 +1710,12 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg) const struct wmi_wmm_params_all_arg *arg)
{ {
struct wmi_tlv_vdev_set_wmm_cmd *cmd; struct wmi_tlv_vdev_set_wmm_cmd *cmd;
struct wmi_wmm_params *wmm;
struct wmi_tlv *tlv; struct wmi_tlv *tlv;
struct sk_buff *skb; struct sk_buff *skb;
size_t len; size_t len;
void *ptr; void *ptr;
len = (sizeof(*tlv) + sizeof(*cmd)) + len = sizeof(*tlv) + sizeof(*cmd);
(4 * (sizeof(*tlv) + sizeof(*wmm)));
skb = ath10k_wmi_alloc_skb(ar, len); skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb) if (!skb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1623,13 +1727,10 @@ ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
cmd = (void *)tlv->value; cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_id = __cpu_to_le32(vdev_id);
ptr += sizeof(*tlv); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
ptr += sizeof(*cmd); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
return skb; return skb;
@ -2080,8 +2181,7 @@ ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
} }
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
enum wmi_stats_id stats_id)
{ {
struct wmi_request_stats_cmd *cmd; struct wmi_request_stats_cmd *cmd;
struct wmi_tlv *tlv; struct wmi_tlv *tlv;
@ -2095,7 +2195,7 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd)); tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value; cmd = (void *)tlv->value;
cmd->stats_id = __cpu_to_le32(stats_id); cmd->stats_id = __cpu_to_le32(stats_mask);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
return skb; return skb;

View file

@ -1302,8 +1302,14 @@ struct wmi_tlv_pdev_set_wmm_cmd {
__le32 dg_type; /* no idea.. */ __le32 dg_type; /* no idea.. */
} __packed; } __packed;
struct wmi_tlv_vdev_wmm_params {
__le32 dummy;
struct wmi_wmm_params params;
} __packed;
struct wmi_tlv_vdev_set_wmm_cmd { struct wmi_tlv_vdev_set_wmm_cmd {
__le32 vdev_id; __le32 vdev_id;
struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
} __packed; } __packed;
struct wmi_tlv_phyerr_ev { struct wmi_tlv_phyerr_ev {
@ -1439,6 +1445,15 @@ struct wmi_tlv_sta_keepalive_cmd {
__le32 interval; /* in seconds */ __le32 interval; /* in seconds */
} __packed; } __packed;
struct wmi_tlv_stats_ev {
__le32 stats_id; /* WMI_STAT_ */
__le32 num_pdev_stats;
__le32 num_vdev_stats;
__le32 num_peer_stats;
__le32 num_bcnflt_stats;
__le32 num_chan_stats;
} __packed;
void ath10k_wmi_tlv_attach(struct ath10k *ar); void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif #endif

View file

@ -1125,6 +1125,25 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar)
} }
} }
static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH10K_SCAN_STARTING:
complete(&ar->scan.started);
__ath10k_scan_finish(ar);
break;
}
}
static void ath10k_wmi_event_scan_completed(struct ath10k *ar) static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
{ {
lockdep_assert_held(&ar->data_lock); lockdep_assert_held(&ar->data_lock);
@ -1292,6 +1311,7 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
break; break;
case WMI_SCAN_EVENT_START_FAILED: case WMI_SCAN_EVENT_START_FAILED:
ath10k_warn(ar, "received scan start failure event\n"); ath10k_warn(ar, "received scan start failure event\n");
ath10k_wmi_event_scan_start_failed(ar);
break; break;
case WMI_SCAN_EVENT_DEQUEUED: case WMI_SCAN_EVENT_DEQUEUED:
case WMI_SCAN_EVENT_PREEMPTED: case WMI_SCAN_EVENT_PREEMPTED:
@ -4954,7 +4974,7 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
} }
static struct sk_buff * static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
{ {
struct wmi_request_stats_cmd *cmd; struct wmi_request_stats_cmd *cmd;
struct sk_buff *skb; struct sk_buff *skb;
@ -4964,9 +4984,10 @@ ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
cmd = (struct wmi_request_stats_cmd *)skb->data; cmd = (struct wmi_request_stats_cmd *)skb->data;
cmd->stats_id = __cpu_to_le32(stats_id); cmd->stats_id = __cpu_to_le32(stats_mask);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
stats_mask);
return skb; return skb;
} }

View file

@ -3057,8 +3057,12 @@ struct wmi_pdev_stats_peer {
} __packed; } __packed;
enum wmi_stats_id { enum wmi_stats_id {
WMI_REQUEST_PEER_STAT = 0x01, WMI_STAT_PEER = BIT(0),
WMI_REQUEST_AP_STAT = 0x02 WMI_STAT_AP = BIT(1),
WMI_STAT_PDEV = BIT(2),
WMI_STAT_VDEV = BIT(3),
WMI_STAT_BCNFLT = BIT(4),
WMI_STAT_VDEV_RATE = BIT(5),
}; };
struct wlan_inst_rssi_args { struct wlan_inst_rssi_args {
@ -3093,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
} __packed; } __packed;
struct wmi_stats_event { struct wmi_stats_event {
__le32 stats_id; /* %WMI_REQUEST_ */ __le32 stats_id; /* WMI_STAT_ */
/* /*
* number of pdev stats event structures * number of pdev stats event structures
* (wmi_pdev_stats) 0 or 1 * (wmi_pdev_stats) 0 or 1
@ -3745,6 +3749,11 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_VHT80_RATEMASK, WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
}; };
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
/* slot time long */ /* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1 #define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */ /* slot time short */
@ -4436,7 +4445,8 @@ enum wmi_peer_param {
WMI_PEER_AUTHORIZE = 0x3, WMI_PEER_AUTHORIZE = 0x3,
WMI_PEER_CHAN_WIDTH = 0x4, WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5, WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6 WMI_PEER_USE_4ADDR = 0x6,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
}; };
struct wmi_peer_set_param_cmd { struct wmi_peer_set_param_cmd {

View file

@ -284,12 +284,12 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
AR_MCI_INTERRUPT_RX_MSG_CONT_RST); AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
if (mci->is_2g) { if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
ar9003_mci_send_lna_transfer(ah, true); ar9003_mci_send_lna_transfer(ah, true);
udelay(5); udelay(5);
} }
if ((mci->is_2g && !mci->update_2g5g)) { if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
if (ar9003_mci_wait_for_interrupt(ah, if (ar9003_mci_wait_for_interrupt(ah,
AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
@ -593,7 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
if (!time_out) if (!time_out)
break; break;
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
if (offset == MCI_GPM_INVALID) if (offset == MCI_GPM_INVALID)
continue; continue;
@ -657,7 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
time_out = 0; time_out = 0;
while (more_data == MCI_GPM_MORE) { while (more_data == MCI_GPM_MORE) {
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
if (offset == MCI_GPM_INVALID) if (offset == MCI_GPM_INVALID)
break; break;
@ -771,8 +771,14 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
static void ar9003_mci_mute_bt(struct ath_hw *ah) static void ar9003_mci_mute_bt(struct ath_hw *ah)
{ {
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
/* disable all MCI messages */ /* disable all MCI messages */
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
/* wait pending HW messages to flush out */ /* wait pending HW messages to flush out */
@ -783,9 +789,10 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
* 1. reset not after resuming from full sleep * 1. reset not after resuming from full sleep
* 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
*/ */
ar9003_mci_send_lna_take(ah, true); if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
ar9003_mci_send_lna_take(ah, true);
udelay(5); udelay(5);
}
ar9003_mci_send_sys_sleeping(ah, true); ar9003_mci_send_sys_sleeping(ah, true);
} }
@ -821,6 +828,80 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1); AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
} }
static void ar9003_mci_stat_setup(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (!AR_SREV_9565(ah))
return;
if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) {
REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
AR_MCI_DBG_CNT_CTRL_ENABLE, 1);
REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
AR_MCI_DBG_CNT_CTRL_BT_LINKID,
MCI_STAT_ALL_BT_LINKID);
} else {
REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
}
}
static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah)
{
u32 regval;
regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
}
static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah)
{
u32 regval;
regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(0, AR_BTCOEX_CTRL_PA_SHARED) |
SM(0, AR_BTCOEX_CTRL_LNA_SHARED) |
SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0);
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
}
static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah)
{
u32 regval;
regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
}
int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep) bool is_full_sleep)
{ {
@ -831,11 +912,6 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n", ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
is_full_sleep, is_2g); is_full_sleep, is_2g);
if (!mci->gpm_addr && !mci->sched_addr) {
ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
return -ENOMEM;
}
if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
ath_err(common, "BTCOEX control register is dead\n"); ath_err(common, "BTCOEX control register is dead\n");
return -EINVAL; return -EINVAL;
@ -850,25 +926,16 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
* To avoid MCI state machine be affected by incoming remote MCI msgs, * To avoid MCI state machine be affected by incoming remote MCI msgs,
* MCI mode will be enabled later, right before reset the MCI TX and RX. * MCI mode will be enabled later, right before reset the MCI TX and RX.
*/ */
regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
if (AR_SREV_9565(ah)) { if (AR_SREV_9565(ah)) {
regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) | u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH);
SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
} else {
regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
}
REG_WRITE(ah, AR_BTCOEX_CTRL, regval); if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED)
ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah);
else
ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah);
} else {
ar9003_mci_set_btcoex_ctrl_9462(ah);
}
if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true); ar9003_mci_osla_setup(ah, true);
@ -926,23 +993,26 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval); REG_WRITE(ah, AR_MCI_COMMAND2, regval);
ar9003_mci_get_next_gpm_offset(ah, true, NULL); /* Init GPM offset after MCI Reset Rx */
ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM))); SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
REG_CLR_BIT(ah, AR_MCI_TX_CTRL, if (MCI_ANT_ARCH_PA_LNA_SHARED(mci))
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
else
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
ar9003_mci_observation_set_up(ah); ar9003_mci_observation_set_up(ah);
mci->ready = true; mci->ready = true;
ar9003_mci_prep_interface(ah); ar9003_mci_prep_interface(ah);
ar9003_mci_stat_setup(ah);
if (AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
if (en_int) if (en_int)
ar9003_mci_enable_interrupt(ah); ar9003_mci_enable_interrupt(ah);
@ -1218,6 +1288,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
} }
value &= AR_BTCOEX_CTRL_MCI_MODE_EN; value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break; break;
case MCI_STATE_INIT_GPM_OFFSET:
value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
if (value < mci->gpm_len)
mci->gpm_idx = value;
else
mci->gpm_idx = 0;
break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET: case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS), value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_LAST_SCHD_MSG_INDEX); AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@ -1364,21 +1442,11 @@ void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
mci->gpm_idx = 0; mci->gpm_idx = 0;
} }
u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more) u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more)
{ {
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset, more_gpm = 0, gpm_ptr; u32 offset, more_gpm = 0, gpm_ptr;
if (first) {
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
if (gpm_ptr >= mci->gpm_len)
gpm_ptr = 0;
mci->gpm_idx = gpm_ptr;
return gpm_ptr;
}
/* /*
* This could be useful to avoid new GPM message interrupt which * This could be useful to avoid new GPM message interrupt which
* may lead to spurious interrupt after power sleep, or multiple * may lead to spurious interrupt after power sleep, or multiple

View file

@ -92,14 +92,36 @@ enum mci_gpm_coex_bt_update_flags_op {
#define ATH_MCI_CONFIG_CLK_DIV 0x00003000 #define ATH_MCI_CONFIG_CLK_DIV 0x00003000
#define ATH_MCI_CONFIG_CLK_DIV_S 12 #define ATH_MCI_CONFIG_CLK_DIV_S 12
#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000 #define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
#define ATH_MCI_CONFIG_DISABLE_AIC 0x00008000
#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN 0x007f0000
#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S 16
#define ATH_MCI_CONFIG_NO_QUIET_ACK 0x00800000
#define ATH_MCI_CONFIG_NO_QUIET_ACK_S 23
#define ATH_MCI_CONFIG_ANT_ARCH 0x07000000
#define ATH_MCI_CONFIG_ANT_ARCH_S 24
#define ATH_MCI_CONFIG_FORCE_QUIET_ACK 0x08000000
#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S 27
#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK 0x10000000
#define ATH_MCI_CONFIG_MCI_STAT_DBG 0x20000000
#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000 #define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000 #define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \ #define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
ATH_MCI_CONFIG_MCI_OBS_TXRX | \ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
ATH_MCI_CONFIG_MCI_OBS_BT) ATH_MCI_CONFIG_MCI_OBS_BT)
#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F #define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00
#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED 0x01
#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02
#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED 0x03
#define ATH_MCI_ANT_ARCH_3_ANT 0x04
#define MCI_ANT_ARCH_PA_LNA_SHARED(mci) \
((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \
(MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED))
enum mci_message_header { /* length of payload */ enum mci_message_header { /* length of payload */
MCI_LNA_CTRL = 0x10, /* len = 0 */ MCI_LNA_CTRL = 0x10, /* len = 0 */
MCI_CONT_NACK = 0x20, /* len = 0 */ MCI_CONT_NACK = 0x20, /* len = 0 */
@ -188,20 +210,55 @@ enum mci_bt_state {
MCI_BT_CAL MCI_BT_CAL
}; };
enum mci_ps_state {
MCI_PS_DISABLE,
MCI_PS_ENABLE,
MCI_PS_ENABLE_OFF,
MCI_PS_ENABLE_ON
};
/* Type of state query */ /* Type of state query */
enum mci_state_type { enum mci_state_type {
MCI_STATE_ENABLE, MCI_STATE_ENABLE,
MCI_STATE_INIT_GPM_OFFSET,
MCI_STATE_CHECK_GPM_OFFSET,
MCI_STATE_NEXT_GPM_OFFSET,
MCI_STATE_LAST_GPM_OFFSET,
MCI_STATE_BT,
MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE, MCI_STATE_SET_BT_AWAKE,
MCI_STATE_SET_BT_CAL_START,
MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET, MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP, MCI_STATE_REMOTE_SLEEP,
MCI_STATE_CONT_STATUS,
MCI_STATE_RESET_REQ_WAKE, MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION, MCI_STATE_SEND_WLAN_COEX_VERSION,
MCI_STATE_SET_BT_COEX_VERSION,
MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY, MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY, MCI_STATE_SEND_STATUS_QUERY,
MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX, MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP, MCI_STATE_NEED_FTP_STOMP,
MCI_STATE_NEED_TUNING,
MCI_STATE_NEED_STAT_DEBUG,
MCI_STATE_SHARED_CHAIN_CONCUR_TX,
MCI_STATE_AIC_CAL,
MCI_STATE_AIC_START,
MCI_STATE_AIC_CAL_RESET,
MCI_STATE_AIC_CAL_SINGLE,
MCI_STATE_IS_AR9462,
MCI_STATE_IS_AR9565_1ANT,
MCI_STATE_IS_AR9565_2ANT,
MCI_STATE_WLAN_WEAK_SIGNAL,
MCI_STATE_SET_WLAN_PS_STATE,
MCI_STATE_GET_WLAN_PS_STATE,
MCI_STATE_DEBUG, MCI_STATE_DEBUG,
MCI_STATE_NEED_FLUSH_BT_INFO, MCI_STATE_STAT_DEBUG,
MCI_STATE_ALLOW_FCS,
MCI_STATE_SET_2G_CONTENTION,
MCI_STATE_MAX MCI_STATE_MAX
}; };
@ -255,7 +312,7 @@ int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
void ar9003_mci_cleanup(struct ath_hw *ah); void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr); u32 *rx_msg_intr);
u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more); u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more);
void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor); void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
void ar9003_mci_send_wlan_channels(struct ath_hw *ah); void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
/* /*

View file

@ -20,11 +20,25 @@
#include "reg_wow.h" #include "reg_wow.h"
#include "hw-ops.h" #include "hw-ops.h"
static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
{
if (!ath9k_hw_mci_is_enabled(ah))
goto set;
/*
* If MCI is being used, set PWR_SAV only when MCI's
* PS state is disabled.
*/
if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
return;
set:
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
}
static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
{ {
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); ath9k_hw_set_sta_powersave(ah);
/* set rx disable bit */ /* set rx disable bit */
REG_WRITE(ah, AR_CR, AR_CR_RXD); REG_WRITE(ah, AR_CR, AR_CR_RXD);
@ -44,6 +58,9 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE); REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
} }
if (ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
} }
@ -74,8 +91,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
for (i = 0; i < KAL_NUM_DESC_WORDS; i++) for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
(KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) | data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
@ -88,9 +103,11 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
(ap_mac_addr[1] << 8) | (ap_mac_addr[0]); (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]); data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
if (AR_SREV_9462_20(ah)) { if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
/* AR9462 2.0 has an extra descriptor word (time based /*
* discard) compared to other chips */ * AR9462 2.0 and AR9565 have an extra descriptor word
* (time based discard) compared to other chips.
*/
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0); REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
wow_ka_data_word0 = AR_WOW_TXBUF(13); wow_ka_data_word0 = AR_WOW_TXBUF(13);
} else { } else {
@ -99,7 +116,6 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
for (i = 0; i < KAL_NUM_DATA_WORDS; i++) for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]); REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
} }
int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
@ -170,18 +186,17 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
u32 val = 0, rval; u32 val = 0, rval;
/* /*
* read the WoW status register to know * Read the WoW status register to know
* the wakeup reason * the wakeup reason.
*/ */
rval = REG_READ(ah, AR_WOW_PATTERN); rval = REG_READ(ah, AR_WOW_PATTERN);
val = AR_WOW_STATUS(rval); val = AR_WOW_STATUS(rval);
/* /*
* mask only the WoW events that we have enabled. Sometimes * Mask only the WoW events that we have enabled. Sometimes
* we have spurious WoW events from the AR_WOW_PATTERN * we have spurious WoW events from the AR_WOW_PATTERN
* register. This mask will clean it up. * register. This mask will clean it up.
*/ */
val &= ah->wow.wow_event_mask; val &= ah->wow.wow_event_mask;
if (val) { if (val) {
@ -195,6 +210,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
wow_status |= AH_WOW_BEACON_MISS; wow_status |= AH_WOW_BEACON_MISS;
} }
rval = REG_READ(ah, AR_MAC_PCU_WOW4);
val = AR_WOW_STATUS2(rval);
val &= ah->wow.wow_event_mask2;
if (val) {
if (AR_WOW2_PATTERN_FOUND(val))
wow_status |= AH_WOW_USER_PATTERN_EN;
}
/* /*
* set and clear WOW_PME_CLEAR registers for the chip to * set and clear WOW_PME_CLEAR registers for the chip to
* generate next wow signal. * generate next wow signal.
@ -206,10 +230,12 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
AR_PMCTRL_PWR_STATE_D1D3); AR_PMCTRL_PWR_STATE_D1D3);
/* /*
* clear all events * Clear all events.
*/ */
REG_WRITE(ah, AR_WOW_PATTERN, REG_WRITE(ah, AR_WOW_PATTERN,
AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
REG_WRITE(ah, AR_MAC_PCU_WOW4,
AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
/* /*
* restore the beacon threshold to init value * restore the beacon threshold to init value
@ -226,7 +252,15 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
if (ah->is_pciexpress) if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, false); ath9k_hw_configpcipowersave(ah, false);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
if (!(dc & AR_DC_TSF2_ENABLE))
ath9k_hw_gen_timer_start_tsf2(ah);
}
ah->wow.wow_event_mask = 0; ah->wow.wow_event_mask = 0;
ah->wow.wow_event_mask2 = 0;
return wow_status; return wow_status;
} }
@ -408,6 +442,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
ath9k_hw_wow_set_arwr_reg(ah); ath9k_hw_wow_set_arwr_reg(ah);
if (ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
/* HW WoW */ /* HW WoW */
REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5)); REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));

View file

@ -645,6 +645,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
struct ath9k_vif_iter_data *iter_data); struct ath9k_vif_iter_data *iter_data);
void ath9k_calculate_summary_state(struct ath_softc *sc, void ath9k_calculate_summary_state(struct ath_softc *sc,
struct ath_chanctx *ctx); struct ath_chanctx *ctx);
void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif);
/*******************/ /*******************/
/* Beacon Handling */ /* Beacon Handling */

View file

@ -103,7 +103,9 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
return; return;
} }
if (AR_SREV_9300_20_OR_LATER(ah)) { if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
} else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@ -307,6 +309,18 @@ static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
btcoex->enabled = true; btcoex->enabled = true;
} }
static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
int i;
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex_hw->wlan_weight[i]);
}
void ath9k_hw_btcoex_enable(struct ath_hw *ah) void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{ {
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@ -318,17 +332,18 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
ath9k_hw_btcoex_enable_2wire(ah); ath9k_hw_btcoex_enable_2wire(ah);
break; break;
case ATH_BTCOEX_CFG_3WIRE: case ATH_BTCOEX_CFG_3WIRE:
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
ath9k_hw_btcoex_enable_mci(ah);
return;
}
ath9k_hw_btcoex_enable_3wire(ah); ath9k_hw_btcoex_enable_3wire(ah);
break; break;
case ATH_BTCOEX_CFG_MCI:
ath9k_hw_btcoex_enable_mci(ah);
break;
} }
REG_RMW(ah, AR_GPIO_PDPU, if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
(0x2 << (btcoex_hw->btactive_gpio * 2)), REG_RMW(ah, AR_GPIO_PDPU,
(0x3 << (btcoex_hw->btactive_gpio * 2))); (0x2 << (btcoex_hw->btactive_gpio * 2)),
(0x3 << (btcoex_hw->btactive_gpio * 2)));
}
ah->btcoex_hw.enabled = true; ah->btcoex_hw.enabled = true;
} }
@ -340,14 +355,14 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
int i; int i;
btcoex_hw->enabled = false; btcoex_hw->enabled = false;
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) {
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) ath9k_hw_btcoex_disable_mci(ah);
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex_hw->wlan_weight[i]);
return; return;
} }
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
if (!AR_SREV_9300_20_OR_LATER(ah))
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT); AR_GPIO_OUTPUT_MUX_AS_OUTPUT);

View file

@ -58,6 +58,7 @@ enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE, ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE, ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE, ATH_BTCOEX_CFG_3WIRE,
ATH_BTCOEX_CFG_MCI,
}; };
struct ath9k_hw_mci { struct ath9k_hw_mci {

View file

@ -1156,7 +1156,10 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
if (tpc_enabled != ah->tpc_enabled) { if (tpc_enabled != ah->tpc_enabled) {
ah->tpc_enabled = tpc_enabled; ah->tpc_enabled = tpc_enabled;
ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
mutex_lock(&sc->mutex);
ath9k_set_txpower(sc, NULL);
mutex_unlock(&sc->mutex);
} }
return count; return count;

View file

@ -202,18 +202,17 @@ static void ath_btcoex_period_timer(unsigned long data)
} }
spin_unlock_irqrestore(&sc->sc_pm_lock, flags); spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_mci_update_rssi(sc);
ath9k_ps_wakeup(sc); ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
ath9k_mci_update_rssi(sc);
ath_mci_ftp_adjust(sc);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath_detect_bt_priority(sc); ath_detect_bt_priority(sc);
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
ath_mci_ftp_adjust(sc);
spin_lock_bh(&btcoex->btcoex_lock);
stomp_type = btcoex->bt_stomp_type; stomp_type = btcoex->bt_stomp_type;
timer_period = btcoex->btcoex_no_stomp; timer_period = btcoex->btcoex_no_stomp;
@ -252,9 +251,6 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
struct ath_softc *sc = (struct ath_softc *)arg; struct ath_softc *sc = (struct ath_softc *)arg;
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_common *common = ath9k_hw_common(ah);
ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc); ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock); spin_lock_bh(&btcoex->btcoex_lock);
@ -271,7 +267,7 @@ static void ath_btcoex_no_stomp_timer(unsigned long arg)
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
} }
static int ath_init_btcoex_timer(struct ath_softc *sc) static void ath_init_btcoex_timer(struct ath_softc *sc)
{ {
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
@ -280,6 +276,7 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
btcoex->btcoex_period / 100; btcoex->btcoex_period / 100;
btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btcoex_period / 100; btcoex->btcoex_period / 100;
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc); (unsigned long) sc);
@ -287,8 +284,6 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
(unsigned long) sc); (unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock); spin_lock_init(&btcoex->btcoex_lock);
return 0;
} }
/* /*
@ -299,6 +294,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
return;
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */ /* make sure duty cycle timer is also stopped when resuming */
@ -312,13 +311,19 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
mod_timer(&btcoex->period_timer, jiffies); mod_timer(&btcoex->period_timer, jiffies);
} }
/* /*
* Pause btcoex timer and bt duty cycle timer * Pause btcoex timer and bt duty cycle timer
*/ */
void ath9k_btcoex_timer_pause(struct ath_softc *sc) void ath9k_btcoex_timer_pause(struct ath_softc *sc)
{ {
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
return;
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n");
del_timer_sync(&btcoex->period_timer); del_timer_sync(&btcoex->period_timer);
del_timer_sync(&btcoex->no_stomp_timer); del_timer_sync(&btcoex->no_stomp_timer);
@ -356,33 +361,33 @@ void ath9k_start_btcoex(struct ath_softc *sc)
{ {
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) && if (ah->btcoex_hw.enabled ||
!ah->btcoex_hw.enabled) { ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) return;
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT, 0);
else
ath9k_hw_btcoex_set_weight(ah, 0, 0,
ATH_BTCOEX_STOMP_NONE);
ath9k_hw_btcoex_enable(ah);
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath9k_btcoex_timer_resume(sc); ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
} AR_STOMP_LOW_WLAN_WGHT, 0);
else
ath9k_hw_btcoex_set_weight(ah, 0, 0,
ATH_BTCOEX_STOMP_NONE);
ath9k_hw_btcoex_enable(ah);
ath9k_btcoex_timer_resume(sc);
} }
void ath9k_stop_btcoex(struct ath_softc *sc) void ath9k_stop_btcoex(struct ath_softc *sc)
{ {
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
if (ah->btcoex_hw.enabled && if (!ah->btcoex_hw.enabled ||
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) return;
ath9k_btcoex_timer_pause(sc);
ath9k_hw_btcoex_disable(ah); ath9k_btcoex_timer_pause(sc);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) ath9k_hw_btcoex_disable(ah);
ath_mci_flush_profile(&sc->btcoex.mci);
} if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
ath_mci_flush_profile(&sc->btcoex.mci);
} }
void ath9k_deinit_btcoex(struct ath_softc *sc) void ath9k_deinit_btcoex(struct ath_softc *sc)
@ -409,22 +414,20 @@ int ath9k_init_btcoex(struct ath_softc *sc)
break; break;
case ATH_BTCOEX_CFG_3WIRE: case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_init_3wire(sc->sc_ah); ath9k_hw_btcoex_init_3wire(sc->sc_ah);
r = ath_init_btcoex_timer(sc); ath_init_btcoex_timer(sc);
if (r)
return -1;
txq = sc->tx.txq_map[IEEE80211_AC_BE]; txq = sc->tx.txq_map[IEEE80211_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; break;
if (ath9k_hw_mci_is_enabled(ah)) { case ATH_BTCOEX_CFG_MCI:
sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; ath_init_btcoex_timer(sc);
INIT_LIST_HEAD(&sc->btcoex.mci.info);
r = ath_mci_setup(sc); sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
if (r) INIT_LIST_HEAD(&sc->btcoex.mci.info);
return r; ath9k_hw_btcoex_init_mci(ah);
ath9k_hw_btcoex_init_mci(ah); r = ath_mci_setup(sc);
} if (r)
return r;
break; break;
default: default:

View file

@ -40,6 +40,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */ { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
{ USB_DEVICE(0x0cf3, 0x7015), { USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */ .driver_info = AR9287_USB }, /* Atheros */

View file

@ -27,6 +27,7 @@
#include "eeprom.h" #include "eeprom.h"
#include "calib.h" #include "calib.h"
#include "reg.h" #include "reg.h"
#include "reg_mci.h"
#include "phy.h" #include "phy.h"
#include "btcoex.h" #include "btcoex.h"
#include "dynack.h" #include "dynack.h"

View file

@ -1172,6 +1172,38 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
} }
static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
int *power = (int *)data;
if (*power < vif->bss_conf.txpower)
*power = vif->bss_conf.txpower;
}
/* Called with sc->mutex held. */
void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
{
int power;
struct ath_hw *ah = sc->sc_ah;
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
ath9k_ps_wakeup(sc);
if (ah->tpc_enabled) {
power = (vif) ? vif->bss_conf.txpower : -1;
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_tpc_vif_iter, &power);
if (power == -1)
power = sc->hw->conf.power_level;
} else {
power = sc->hw->conf.power_level;
}
sc->cur_chan->txpower = 2 * power;
ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
sc->cur_chan->cur_txpower = reg->max_power_level;
ath9k_ps_restore(sc);
}
static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
@ -1225,6 +1257,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_assign_hw_queues(hw, vif); ath9k_assign_hw_queues(hw, vif);
ath9k_set_txpower(sc, vif);
an->sc = sc; an->sc = sc;
an->sta = NULL; an->sta = NULL;
an->vif = vif; an->vif = vif;
@ -1265,6 +1299,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath9k_assign_hw_queues(hw, vif); ath9k_assign_hw_queues(hw, vif);
ath9k_calculate_summary_state(sc, avp->chanctx); ath9k_calculate_summary_state(sc, avp->chanctx);
ath9k_set_txpower(sc, vif);
mutex_unlock(&sc->mutex); mutex_unlock(&sc->mutex);
return 0; return 0;
} }
@ -1294,6 +1330,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(sc, avp->chanctx); ath9k_calculate_summary_state(sc, avp->chanctx);
ath9k_set_txpower(sc, NULL);
mutex_unlock(&sc->mutex); mutex_unlock(&sc->mutex);
} }
@ -1397,14 +1435,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
} }
if (changed & IEEE80211_CONF_CHANGE_POWER) {
ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->cur_chan->txpower = 2 * conf->power_level;
ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
sc->cur_chan->txpower,
&sc->cur_chan->cur_txpower);
}
mutex_unlock(&sc->mutex); mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
@ -1764,6 +1794,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & CHECK_ANI) if (changed & CHECK_ANI)
ath_check_ani(sc); ath_check_ani(sc);
if (changed & BSS_CHANGED_TXPOWER) {
ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
vif->addr, bss_conf->txpower, bss_conf->txpower_type);
ath9k_set_txpower(sc, vif);
}
mutex_unlock(&sc->mutex); mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc); ath9k_ps_restore(sc);

View file

@ -495,7 +495,7 @@ void ath_mci_intr(struct ath_softc *sc)
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
ar9003_mci_get_next_gpm_offset(ah, true, NULL); ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
return; return;
} }
@ -559,8 +559,7 @@ void ath_mci_intr(struct ath_softc *sc)
return; return;
pgpm = mci->gpm_buf.bf_addr; pgpm = mci->gpm_buf.bf_addr;
offset = ar9003_mci_get_next_gpm_offset(ah, false, offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
&more_data);
if (offset == MCI_GPM_INVALID) if (offset == MCI_GPM_INVALID)
break; break;

View file

@ -2044,279 +2044,4 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0 #define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
#define AR_MCI_COMMAND0 0x1800
#define AR_MCI_COMMAND0_HEADER 0xFF
#define AR_MCI_COMMAND0_HEADER_S 0
#define AR_MCI_COMMAND0_LEN 0x1f00
#define AR_MCI_COMMAND0_LEN_S 8
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
#define AR_MCI_COMMAND1 0x1804
#define AR_MCI_COMMAND2 0x1808
#define AR_MCI_COMMAND2_RESET_TX 0x01
#define AR_MCI_COMMAND2_RESET_TX_S 0
#define AR_MCI_COMMAND2_RESET_RX 0x02
#define AR_MCI_COMMAND2_RESET_RX_S 1
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
#define AR_MCI_RX_CTRL 0x180c
#define AR_MCI_TX_CTRL 0x1810
/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
#define AR_MCI_TX_CTRL_CLK_DIV 0x03
#define AR_MCI_TX_CTRL_CLK_DIV_S 0
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
#define AR_MCI_SCHD_TABLE_0 0x1818
#define AR_MCI_SCHD_TABLE_1 0x181c
#define AR_MCI_GPM_0 0x1820
#define AR_MCI_GPM_1 0x1824
#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
#define AR_MCI_GPM_WRITE_PTR_S 16
#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
#define AR_MCI_GPM_BUF_LEN_S 0
#define AR_MCI_INTERRUPT_RAW 0x1828
#define AR_MCI_INTERRUPT_EN 0x182c
#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_S 9
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
#define AR_MCI_INTERRUPT_BT_PRI_S 11
#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
#define AR_MCI_INTERRUPT_BT_FREQ_S 28
#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
#define AR_MCI_INTERRUPT_BT_STOMP_S 29
#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
AR_MCI_INTERRUPT_RX_INVALID_HDR | \
AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_MSG | \
AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
#define AR_MCI_REMOTE_CPU_INT 0x1830
#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#define AR_MCI_CPU_INT 0x1840
#define AR_MCI_RX_STATUS 0x1844
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
#define AR_MCI_RX_REMOTE_SLEEP_S 12
#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
#define AR_MCI_RX_MCI_CLK_REQ_S 13
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
#define AR_MCI_CONT_PRIORITY 0x0000FF00
#define AR_MCI_CONT_PRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16
#define AR_MCI_BT_PRI0 0x184c
#define AR_MCI_BT_PRI1 0x1850
#define AR_MCI_BT_PRI2 0x1854
#define AR_MCI_BT_PRI3 0x1858
#define AR_MCI_BT_PRI 0x185c
#define AR_MCI_WL_FREQ0 0x1860
#define AR_MCI_WL_FREQ1 0x1864
#define AR_MCI_WL_FREQ2 0x1868
#define AR_MCI_GAIN 0x186c
#define AR_MCI_WBTIMER1 0x1870
#define AR_MCI_WBTIMER2 0x1874
#define AR_MCI_WBTIMER3 0x1878
#define AR_MCI_WBTIMER4 0x187c
#define AR_MCI_MAXGAIN 0x1880
#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
#define AR_MCI_HW_SCHD_TBL_D0 0x1888
#define AR_MCI_HW_SCHD_TBL_D1 0x188c
#define AR_MCI_HW_SCHD_TBL_D2 0x1890
#define AR_MCI_HW_SCHD_TBL_D3 0x1894
#define AR_MCI_TX_PAYLOAD0 0x1898
#define AR_MCI_TX_PAYLOAD1 0x189c
#define AR_MCI_TX_PAYLOAD2 0x18a0
#define AR_MCI_TX_PAYLOAD3 0x18a4
#define AR_BTCOEX_WBTIMER 0x18a8
#define AR_BTCOEX_CTRL 0x18ac
#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
#define AR_BTCOEX_CTRL_PA_SHARED_S 4
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
#define AR_BTCOEX_CTRL2 0x1948
#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
#define AR_GLB_WLAN_UART_INTF_EN_S 17
#define AR_GLB_DS_JTAG_DISABLE 0x00040000
#define AR_GLB_DS_JTAG_DISABLE_S 18
#define AR_BTCOEX_RC 0x194c
#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
#define AR_BTCOEX_DBG 0x1a50
#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
#define AR_MCI_SCHD_TABLE_2 0x1a5c
#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
#define AR_BTCOEX_CTRL3 0x1a60
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
#define AR_MCI_MISC 0x1a74
#define AR_MCI_MISC_HW_FIX_EN 0x00000001
#define AR_MCI_MISC_HW_FIX_EN_S 0
#define AR_MCI_DBG_CNT_CTRL 0x1a78
#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
#endif #endif

View file

@ -0,0 +1,310 @@
/*
* Copyright (c) 2015 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef REG_MCI_H
#define REG_MCI_H
#define AR_MCI_COMMAND0 0x1800
#define AR_MCI_COMMAND0_HEADER 0xFF
#define AR_MCI_COMMAND0_HEADER_S 0
#define AR_MCI_COMMAND0_LEN 0x1f00
#define AR_MCI_COMMAND0_LEN_S 8
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
#define AR_MCI_COMMAND1 0x1804
#define AR_MCI_COMMAND2 0x1808
#define AR_MCI_COMMAND2_RESET_TX 0x01
#define AR_MCI_COMMAND2_RESET_TX_S 0
#define AR_MCI_COMMAND2_RESET_RX 0x02
#define AR_MCI_COMMAND2_RESET_RX_S 1
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
#define AR_MCI_RX_CTRL 0x180c
#define AR_MCI_TX_CTRL 0x1810
/*
* 0 = no division,
* 1 = divide by 2,
* 2 = divide by 4,
* 3 = divide by 8
*/
#define AR_MCI_TX_CTRL_CLK_DIV 0x03
#define AR_MCI_TX_CTRL_CLK_DIV_S 0
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
#define AR_MCI_SCHD_TABLE_0 0x1818
#define AR_MCI_SCHD_TABLE_1 0x181c
#define AR_MCI_GPM_0 0x1820
#define AR_MCI_GPM_1 0x1824
#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
#define AR_MCI_GPM_WRITE_PTR_S 16
#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
#define AR_MCI_GPM_BUF_LEN_S 0
#define AR_MCI_INTERRUPT_RAW 0x1828
#define AR_MCI_INTERRUPT_EN 0x182c
#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_S 9
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
#define AR_MCI_INTERRUPT_BT_PRI_S 11
#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
#define AR_MCI_INTERRUPT_BT_FREQ_S 28
#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
#define AR_MCI_INTERRUPT_BT_STOMP_S 29
#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
#define AR_MCI_REMOTE_CPU_INT 0x1830
#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
#define AR_MCI_CPU_INT 0x1840
#define AR_MCI_RX_STATUS 0x1844
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
#define AR_MCI_RX_REMOTE_SLEEP_S 12
#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
#define AR_MCI_RX_MCI_CLK_REQ_S 13
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
#define AR_MCI_CONT_PRIORITY 0x0000FF00
#define AR_MCI_CONT_PRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16
#define AR_MCI_BT_PRI0 0x184c
#define AR_MCI_BT_PRI1 0x1850
#define AR_MCI_BT_PRI2 0x1854
#define AR_MCI_BT_PRI3 0x1858
#define AR_MCI_BT_PRI 0x185c
#define AR_MCI_WL_FREQ0 0x1860
#define AR_MCI_WL_FREQ1 0x1864
#define AR_MCI_WL_FREQ2 0x1868
#define AR_MCI_GAIN 0x186c
#define AR_MCI_WBTIMER1 0x1870
#define AR_MCI_WBTIMER2 0x1874
#define AR_MCI_WBTIMER3 0x1878
#define AR_MCI_WBTIMER4 0x187c
#define AR_MCI_MAXGAIN 0x1880
#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
#define AR_MCI_HW_SCHD_TBL_D0 0x1888
#define AR_MCI_HW_SCHD_TBL_D1 0x188c
#define AR_MCI_HW_SCHD_TBL_D2 0x1890
#define AR_MCI_HW_SCHD_TBL_D3 0x1894
#define AR_MCI_TX_PAYLOAD0 0x1898
#define AR_MCI_TX_PAYLOAD1 0x189c
#define AR_MCI_TX_PAYLOAD2 0x18a0
#define AR_MCI_TX_PAYLOAD3 0x18a4
#define AR_BTCOEX_WBTIMER 0x18a8
#define AR_BTCOEX_CTRL 0x18ac
#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
#define AR_BTCOEX_CTRL_PA_SHARED_S 4
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
#define AR_BTCOEX_CTRL2 0x1948
#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
#define AR_GLB_WLAN_UART_INTF_EN_S 17
#define AR_GLB_DS_JTAG_DISABLE 0x00040000
#define AR_GLB_DS_JTAG_DISABLE_S 18
#define AR_BTCOEX_RC 0x194c
#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
#define AR_BTCOEX_DBG 0x1a50
#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
#define AR_MCI_SCHD_TABLE_2 0x1a5c
#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
#define AR_BTCOEX_CTRL3 0x1a60
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
#define AR_MCI_MISC 0x1a74
#define AR_MCI_MISC_HW_FIX_EN 0x00000001
#define AR_MCI_MISC_HW_FIX_EN_S 0
#define AR_MCI_DBG_CNT_CTRL 0x1a78
#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
#define AR_MCI_DBG_CNT_CTRL_BT_LINKID 0x000007f8
#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S 3
#define MCI_STAT_ALL_BT_LINKID 0xffff
#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
AR_MCI_INTERRUPT_RX_INVALID_HDR | \
AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_MSG | \
AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#endif /* REG_MCI_H */

View file

@ -72,7 +72,7 @@
#define AR_WOW_MAC_INTR_EN 0x00040000 #define AR_WOW_MAC_INTR_EN 0x00040000
#define AR_WOW_MAGIC_EN 0x00010000 #define AR_WOW_MAGIC_EN 0x00010000
#define AR_WOW_PATTERN_EN(x) (x & 0xff) #define AR_WOW_PATTERN_EN(x) (x & 0xff)
#define AR_WOW_PAT_FOUND_SHIFT 8 #define AR_WOW_PAT_FOUND_SHIFT 8
#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT)) #define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT) #define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
#define AR_WOW_MAGIC_PAT_FOUND 0x00020000 #define AR_WOW_MAGIC_PAT_FOUND 0x00020000
@ -90,6 +90,14 @@
AR_WOW_BEACON_FAIL | \ AR_WOW_BEACON_FAIL | \
AR_WOW_KEEP_ALIVE_FAIL)) AR_WOW_KEEP_ALIVE_FAIL))
#define AR_WOW2_PATTERN_EN(x) ((x & 0xff) << 0)
#define AR_WOW2_PATTERN_FOUND_SHIFT 8
#define AR_WOW2_PATTERN_FOUND(x) (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT))
#define AR_WOW2_PATTERN_FOUND_MASK ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT)
#define AR_WOW_STATUS2(x) (x & AR_WOW2_PATTERN_FOUND_MASK)
#define AR_WOW_CLEAR_EVENTS2(x) (x & ~(AR_WOW2_PATTERN_EN(0xff)))
#define AR_WOW_AIFS_CNT(x) (x & 0xff) #define AR_WOW_AIFS_CNT(x) (x & 0xff)
#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8) #define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16) #define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)

View file

@ -1103,14 +1103,28 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb; struct sk_buff *skb;
struct ath_frame_info *fi; struct ath_frame_info *fi;
struct ieee80211_tx_info *info; struct ieee80211_tx_info *info;
struct ieee80211_vif *vif;
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
if (sc->tx99_state || !ah->tpc_enabled) if (sc->tx99_state || !ah->tpc_enabled)
return MAX_RATE_POWER; return MAX_RATE_POWER;
skb = bf->bf_mpdu; skb = bf->bf_mpdu;
fi = get_frame_info(skb);
info = IEEE80211_SKB_CB(skb); info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
if (!vif) {
max_power = sc->cur_chan->cur_txpower;
goto out;
}
if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
max_power = min_t(u8, sc->cur_chan->cur_txpower,
2 * vif->bss_conf.txpower);
goto out;
}
fi = get_frame_info(skb);
if (!AR_SREV_9300_20_OR_LATER(ah)) { if (!AR_SREV_9300_20_OR_LATER(ah)) {
int txpower = fi->tx_power; int txpower = fi->tx_power;
@ -1147,25 +1161,25 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
txpower -= 2; txpower -= 2;
txpower = max(txpower, 0); txpower = max(txpower, 0);
max_power = min_t(u8, ah->tx_power[rateidx], txpower); max_power = min_t(u8, ah->tx_power[rateidx],
2 * vif->bss_conf.txpower);
/* XXX: clamp minimum TX power at 1 for AR9160 since if max_power = min_t(u8, max_power, txpower);
* max_power is set to 0, frames are transmitted at max
* TX power
*/
if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
max_power = 1;
} else if (!bf->bf_state.bfs_paprd) { } else if (!bf->bf_state.bfs_paprd) {
if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
max_power = min(ah->tx_power_stbc[rateidx], max_power = min_t(u8, ah->tx_power_stbc[rateidx],
fi->tx_power); 2 * vif->bss_conf.txpower);
else else
max_power = min(ah->tx_power[rateidx], fi->tx_power); max_power = min_t(u8, ah->tx_power[rateidx],
2 * vif->bss_conf.txpower);
max_power = min(max_power, fi->tx_power);
} else { } else {
max_power = ah->paprd_training_power; max_power = ah->paprd_training_power;
} }
out:
return max_power; /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power
* is set to 0, frames are transmitted at max TX power
*/
return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
} }
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,

View file

@ -387,11 +387,25 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch; int ch;
int rc = 0; int rc = 0;
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) || if (test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status)) test_bit(wil_status_fwconnected, wil->status))
return -EALREADY; return -EALREADY;
wil_print_connect_params(wil, sme); if (sme->ie_len > WMI_MAX_IE_LEN) {
wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
return -ERANGE;
}
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (sme->privacy && !rsn_eid) {
wil_err(wil, "Missing RSN IE for secure connection\n");
return -EINVAL;
}
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len, sme->ssid, sme->ssid_len,
@ -407,17 +421,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rc = -ENOENT; rc = -ENOENT;
goto out; goto out;
} }
wil->privacy = sme->privacy;
rsn_eid = sme->ie ? if (wil->privacy) {
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (rsn_eid) {
if (sme->ie_len > WMI_MAX_IE_LEN) {
rc = -ERANGE;
wil_err(wil, "IE too large (%td bytes)\n",
sme->ie_len);
goto out;
}
/* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
rc = wmi_del_cipher_key(wil, 0, bss->bssid); rc = wmi_del_cipher_key(wil, 0, bss->bssid);
if (rc) { if (rc) {
@ -450,7 +456,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
bss->capability); bss->capability);
goto out; goto out;
} }
if (rsn_eid) { if (wil->privacy) {
conn.dot11_auth_mode = WMI_AUTH11_SHARED; conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK; conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
@ -769,7 +775,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies); bcon->assocresp_ies);
wil->secure_pcp = info->privacy; wil->privacy = info->privacy;
netif_carrier_on(ndev); netif_carrier_on(ndev);

View file

@ -29,6 +29,7 @@
static u32 mem_addr; static u32 mem_addr;
static u32 dbg_txdesc_index; static u32 dbg_txdesc_index;
static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */
enum dbg_off_type { enum dbg_off_type {
doff_u32 = 0, doff_u32 = 0,
@ -102,23 +103,30 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
% vring->size; % vring->size;
int avail = vring->size - used - 1; int avail = vring->size - used - 1;
char name[10]; char name[10];
char sidle[10];
/* performance monitoring */ /* performance monitoring */
cycles_t now = get_cycles(); cycles_t now = get_cycles();
uint64_t idle = txdata->idle * 100; uint64_t idle = txdata->idle * 100;
uint64_t total = now - txdata->begin; uint64_t total = now - txdata->begin;
do_div(idle, total); if (total != 0) {
do_div(idle, total);
snprintf(sidle, sizeof(sidle), "%3d%%",
(int)idle);
} else {
snprintf(sidle, sizeof(sidle), "N/A");
}
txdata->begin = now; txdata->begin = now;
txdata->idle = 0ULL; txdata->idle = 0ULL;
snprintf(name, sizeof(name), "tx_%2d", i); snprintf(name, sizeof(name), "tx_%2d", i);
seq_printf(s, seq_printf(s,
"\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n", "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid, wil->sta[cid].addr, cid, tid,
txdata->agg_wsize, txdata->agg_timeout, txdata->agg_wsize, txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-", txdata->agg_amsdu ? "+" : "-",
used, avail, (int)idle); used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H'); wil_print_vring(s, wil, name, vring, '_', 'H');
} }
@ -549,7 +557,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
dev_close(ndev); dev_close(ndev);
ndev->flags &= ~IFF_UP; ndev->flags &= ~IFF_UP;
rtnl_unlock(); rtnl_unlock();
wil_reset(wil); wil_reset(wil, true);
return len; return len;
} }
@ -618,7 +626,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
struct wil6210_priv *wil = file->private_data; struct wil6210_priv *wil = file->private_data;
int rc; int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL); char *kbuf = kmalloc(len + 1, GFP_KERNEL);
char cmd[8]; char cmd[9];
int p1, p2, p3; int p1, p2, p3;
if (!kbuf) if (!kbuf)
@ -1392,7 +1400,7 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */ /* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = { static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32), WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong), WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
WIL_FIELD(fw_version, S_IRUGO, doff_u32), WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32), WIL_FIELD(hw_version, S_IRUGO, doff_x32),
@ -1412,6 +1420,8 @@ static const struct dbg_off dbg_statics[] = {
{"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
{"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
{"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
{"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
doff_u32},
{}, {},
}; };

View file

@ -50,27 +50,19 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
wil_dbg_misc(wil, "%s()\n", __func__); wil_dbg_misc(wil, "%s()\n", __func__);
if (test_bit(hw_capability_advanced_itr_moderation, tx_itr_en = ioread32(wil->csr +
wil->hw_capabilities)) { HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
tx_itr_en = ioread32(wil->csr + if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL)); tx_itr_val =
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) ioread32(wil->csr +
tx_itr_val = HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
rx_itr_en = ioread32(wil->csr + rx_itr_en = ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL)); HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
rx_itr_val = rx_itr_val =
ioread32(wil->csr + ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH)); HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
} else {
rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
rx_itr_val = ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
}
cp->tx_coalesce_usecs = tx_itr_val; cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val; cp->rx_coalesce_usecs = rx_itr_val;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014 Qualcomm Atheros, Inc. * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -20,6 +20,7 @@
#include "fw.h" #include "fw.h"
MODULE_FIRMWARE(WIL_FW_NAME); MODULE_FIRMWARE(WIL_FW_NAME);
MODULE_FIRMWARE(WIL_FW2_NAME);
/* target operations */ /* target operations */
/* register read */ /* register read */

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014 Qualcomm Atheros, Inc. * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -451,8 +451,6 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
} }
return -EINVAL; return -EINVAL;
} }
/* Mark FW as loaded from host */
S(RGF_USER_USAGE_6, 1);
return rc; return rc;
} }

View file

@ -166,9 +166,16 @@ void wil_unmask_irq(struct wil6210_priv *wil)
/* target write operation */ /* target write operation */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) #define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
static void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
{ {
wil_dbg_irq(wil, "%s()\n", __func__);
/* disable interrupt moderation for monitor
* to get better timestamp precision
*/
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
return;
/* Disable and clear tx counter before (re)configuration */ /* Disable and clear tx counter before (re)configuration */
W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
@ -206,42 +213,8 @@ void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
} }
static
void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
{
/* disable, use usec resolution */
W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
/* start it */
W(RGF_DMA_ITR_CNT_CRL,
BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
}
#undef W #undef W
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
/* disable interrupt moderation for monitor
* to get better timestamp precision
*/
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
return;
if (test_bit(hw_capability_advanced_itr_moderation,
wil->hw_capabilities))
wil_configure_interrupt_moderation_new(wil);
else {
/* Advanced interrupt moderation is not available before
* Sparrow v2. Will use legacy interrupt moderation
*/
wil_configure_interrupt_moderation_lgc(wil);
}
}
static irqreturn_t wil6210_irq_rx(int irq, void *cookie) static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{ {
struct wil6210_priv *wil = cookie; struct wil6210_priv *wil = cookie;
@ -253,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
trace_wil6210_irq_rx(isr); trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (!isr) { if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n"); wil_err(wil, "spurious IRQ: RX\n");
return IRQ_NONE; return IRQ_NONE;
} }
@ -266,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
* action is always the same - should empty the accumulated * action is always the same - should empty the accumulated
* packets from the RX ring. * packets from the RX ring.
*/ */
if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) { if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
wil_dbg_irq(wil, "RX done\n"); wil_dbg_irq(wil, "RX done\n");
if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
wil_err_ratelimited(wil, wil_err_ratelimited(wil,
"Received \"Rx buffer is in risk of overflow\" interrupt\n"); "Received \"Rx buffer is in risk of overflow\" interrupt\n");
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH); BIT_DMA_EP_RX_ICR_RX_HTRSH);
if (test_bit(wil_status_reset_done, wil->status)) { if (likely(test_bit(wil_status_reset_done, wil->status))) {
if (test_bit(wil_status_napi_en, wil->status)) { if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false; need_unmask = false;
napi_schedule(&wil->napi_rx); napi_schedule(&wil->napi_rx);
@ -289,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
} }
} }
if (isr) if (unlikely(isr))
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
/* Rx IRQ will be enabled when NAPI processing finished */ /* Rx IRQ will be enabled when NAPI processing finished */
@ -313,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
trace_wil6210_irq_tx(isr); trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (!isr) { if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n"); wil_err(wil, "spurious IRQ: TX\n");
return IRQ_NONE; return IRQ_NONE;
} }
wil6210_mask_irq_tx(wil); wil6210_mask_irq_tx(wil);
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
wil_dbg_irq(wil, "TX done\n"); wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */ /* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL); isr &= ~(BIT(25) - 1UL);
if (test_bit(wil_status_reset_done, wil->status)) { if (likely(test_bit(wil_status_reset_done, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false; need_unmask = false;
napi_schedule(&wil->napi_tx); napi_schedule(&wil->napi_tx);
@ -334,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
} }
} }
if (isr) if (unlikely(isr))
wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
/* Tx IRQ will be enabled when NAPI processing finished */ /* Tx IRQ will be enabled when NAPI processing finished */
@ -523,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
/** /**
* pseudo_cause is Clear-On-Read, no need to ACK * pseudo_cause is Clear-On-Read, no need to ACK
*/ */
if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)) if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE; return IRQ_NONE;
/* FIXME: IRQ mask debug */ /* FIXME: IRQ mask debug */
if (wil6210_debug_irq_mask(wil, pseudo_cause)) if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE; return IRQ_NONE;
trace_wil6210_irq_pseudo(pseudo_cause); trace_wil6210_irq_pseudo(pseudo_cause);

View file

@ -29,10 +29,6 @@ bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
static bool no_fw_load = true;
module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
/* if not set via modparam, will be set to default value of 1/8 of /* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow * rx ring size during init flow
*/ */
@ -520,8 +516,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
{ {
int delay = 0; int delay = 0;
u32 x; u32 x;
bool is_reset_v2 = test_bit(hw_capability_reset_v2,
wil->hw_capabilities);
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@ -532,82 +526,67 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_halt_cpu(wil); wil_halt_cpu(wil);
/* clear all boot loader "ready" bits */
W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
/* Clear Fw Download notification */ /* Clear Fw Download notification */
C(RGF_USER_USAGE_6, BIT(0)); C(RGF_USER_USAGE_6, BIT(0));
if (is_reset_v2) { S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); /* XTAL stabilization should take about 3ms */
/* XTAL stabilization should take about 3ms */ usleep_range(5000, 7000);
usleep_range(5000, 7000); x = R(RGF_CAF_PLL_LOCK_STATUS);
x = R(RGF_CAF_PLL_LOCK_STATUS); if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { wil_err(wil, "Xtal stabilization timeout\n"
wil_err(wil, "Xtal stabilization timeout\n" "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); return -ETIME;
return -ETIME;
}
/* switch 10k to XTAL*/
C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
} }
/* switch 10k to XTAL*/
C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
is_reset_v2 ? 0x000000f0 : 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
if (is_reset_v2) { W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
}
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
if (is_reset_v2) { W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
/* reset A2 PCIE AHB */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
} else {
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
}
/* TODO: check order here!!! Erez code is different */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 200..250 msec */ /* wait until device ready. typical time is 20..80 msec */
do { do {
msleep(RST_DELAY); msleep(RST_DELAY);
x = R(RGF_USER_HW_MACHINE_STATE); x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
if (delay++ > RST_COUNT) { if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, hw_state 0x%08x\n", wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x); x);
return -ETIME; return -ETIME;
} }
} while (x != HW_MACHINE_BOOT_DONE); } while (!(x & BIT_BL_READY));
if (!is_reset_v2)
W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0; return 0;
} }
#undef R
#undef W
#undef S
#undef C
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{ {
le32_to_cpus(&r->base); le32_to_cpus(&r->base);
@ -617,6 +596,32 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
le32_to_cpus(&r->head); le32_to_cpus(&r->head);
} }
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
struct RGF_BL bl;
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
le32_to_cpus(&bl.ready);
le32_to_cpus(&bl.version);
le32_to_cpus(&bl.rf_type);
le32_to_cpus(&bl.baseband_type);
if (!is_valid_ether_addr(bl.mac_address)) {
wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
return -EINVAL;
}
ether_addr_copy(ndev->perm_addr, bl.mac_address);
if (!is_valid_ether_addr(ndev->dev_addr))
ether_addr_copy(ndev->dev_addr, bl.mac_address);
wil_info(wil,
"Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
return 0;
}
static int wil_wait_for_fw_ready(struct wil6210_priv *wil) static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{ {
ulong to = msecs_to_jiffies(1000); ulong to = msecs_to_jiffies(1000);
@ -637,7 +642,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
* After calling this routine, you're expected to reload * After calling this routine, you're expected to reload
* the firmware. * the firmware.
*/ */
int wil_reset(struct wil6210_priv *wil) int wil_reset(struct wil6210_priv *wil, bool load_fw)
{ {
int rc; int rc;
@ -675,30 +680,36 @@ int wil_reset(struct wil6210_priv *wil)
if (rc) if (rc)
return rc; return rc;
if (!no_fw_load) { rc = wil_get_bl_info(wil);
wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME); if (rc)
return rc;
if (load_fw) {
wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
WIL_FW2_NAME);
wil_halt_cpu(wil); wil_halt_cpu(wil);
/* Loading f/w from the file */ /* Loading f/w from the file */
rc = wil_request_firmware(wil, WIL_FW_NAME); rc = wil_request_firmware(wil, WIL_FW_NAME);
if (rc)
return rc;
rc = wil_request_firmware(wil, WIL_FW2_NAME);
if (rc) if (rc)
return rc; return rc;
/* clear any interrupts which on-card-firmware may have set */ /* Mark FW as loaded from host */
S(RGF_USER_USAGE_6, 1);
/* clear any interrupts which on-card-firmware
* may have set
*/
wil6210_clear_irq(wil); wil6210_clear_irq(wil);
{ /* CAF_ICR - clear and mask */ /* CAF_ICR - clear and mask */
u32 a = HOSTADDR(RGF_CAF_ICR) + /* it is W1C, clear by writing back same value */
offsetof(struct RGF_ICR, ICR); S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
u32 m = HOSTADDR(RGF_CAF_ICR) + W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
offsetof(struct RGF_ICR, IMV);
u32 icr = ioread32(wil->csr + a);
iowrite32(icr, wil->csr + a); /* W1C */
iowrite32(~0, wil->csr + m);
wmb(); /* wait for completion */
}
wil_release_cpu(wil); wil_release_cpu(wil);
} else {
wil_info(wil, "Use firmware from on-card flash\n");
} }
/* init after reset */ /* init after reset */
@ -706,15 +717,22 @@ int wil_reset(struct wil6210_priv *wil)
reinit_completion(&wil->wmi_ready); reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call); reinit_completion(&wil->wmi_call);
wil_configure_interrupt_moderation(wil); if (load_fw) {
wil_unmask_irq(wil); wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */ /* we just started MAC, wait for FW ready */
rc = wil_wait_for_fw_ready(wil); rc = wil_wait_for_fw_ready(wil);
}
return rc; return rc;
} }
#undef R
#undef W
#undef S
#undef C
void wil_fw_error_recovery(struct wil6210_priv *wil) void wil_fw_error_recovery(struct wil6210_priv *wil)
{ {
wil_dbg_misc(wil, "starting fw error recovery\n"); wil_dbg_misc(wil, "starting fw error recovery\n");
@ -730,7 +748,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(!mutex_is_locked(&wil->mutex));
rc = wil_reset(wil); rc = wil_reset(wil, true);
if (rc) if (rc)
return rc; return rc;
@ -837,7 +855,7 @@ int __wil_down(struct wil6210_priv *wil)
if (!iter) if (!iter)
wil_err(wil, "timeout waiting for idle FW/HW\n"); wil_err(wil, "timeout waiting for idle FW/HW\n");
wil_rx_fini(wil); wil_reset(wil, false);
return 0; return 0;
} }

View file

@ -39,18 +39,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
bitmap_zero(wil->hw_capabilities, hw_capability_last); bitmap_zero(wil->hw_capabilities, hw_capability_last);
switch (rev_id) { switch (rev_id) {
case JTAG_DEV_ID_MARLON_B0:
wil->hw_name = "Marlon B0";
wil->hw_version = HW_VER_MARLON_B0;
break;
case JTAG_DEV_ID_SPARROW_A0:
wil->hw_name = "Sparrow A0";
wil->hw_version = HW_VER_SPARROW_A0;
break;
case JTAG_DEV_ID_SPARROW_A1:
wil->hw_name = "Sparrow A1";
wil->hw_version = HW_VER_SPARROW_A1;
break;
case JTAG_DEV_ID_SPARROW_B0: case JTAG_DEV_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0"; wil->hw_name = "Sparrow B0";
wil->hw_version = HW_VER_SPARROW_B0; wil->hw_version = HW_VER_SPARROW_B0;
@ -62,13 +50,6 @@ void wil_set_capabilities(struct wil6210_priv *wil)
} }
wil_info(wil, "Board hardware is %s\n", wil->hw_name); wil_info(wil, "Board hardware is %s\n", wil->hw_name);
if (wil->hw_version >= HW_VER_SPARROW_A0)
set_bit(hw_capability_reset_v2, wil->hw_capabilities);
if (wil->hw_version >= HW_VER_SPARROW_B0)
set_bit(hw_capability_advanced_itr_moderation,
wil->hw_capabilities);
} }
void wil_disable_irq(struct wil6210_priv *wil) void wil_disable_irq(struct wil6210_priv *wil)
@ -150,7 +131,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
/* need reset here to obtain MAC */ /* need reset here to obtain MAC */
mutex_lock(&wil->mutex); mutex_lock(&wil->mutex);
rc = wil_reset(wil); rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex); mutex_unlock(&wil->mutex);
if (debug_fw) if (debug_fw)
rc = 0; rc = 0;
@ -305,7 +286,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
} }
static const struct pci_device_id wil6210_pcie_ids[] = { static const struct pci_device_id wil6210_pcie_ids[] = {
{ PCI_DEVICE(0x1ae9, 0x0301) },
{ PCI_DEVICE(0x1ae9, 0x0310) }, { PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */ { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
{ /* end: all zeroes */ }, { /* end: all zeroes */ },

View file

@ -53,34 +53,38 @@ static inline int wil_vring_is_full(struct vring *vring)
return wil_vring_next_tail(vring) == vring->swhead; return wil_vring_next_tail(vring) == vring->swhead;
} }
/* /* Used space in Tx Vring */
* Available space in Tx Vring static inline int wil_vring_used_tx(struct vring *vring)
*/
static inline int wil_vring_avail_tx(struct vring *vring)
{ {
u32 swhead = vring->swhead; u32 swhead = vring->swhead;
u32 swtail = vring->swtail; u32 swtail = vring->swtail;
int used = (vring->size + swhead - swtail) % vring->size; return (vring->size + swhead - swtail) % vring->size;
return vring->size - used - 1;
} }
/** /* Available space in Tx Vring */
* wil_vring_wmark_low - low watermark for available descriptor space static inline int wil_vring_avail_tx(struct vring *vring)
*/ {
return vring->size - wil_vring_used_tx(vring) - 1;
}
/* wil_vring_wmark_low - low watermark for available descriptor space */
static inline int wil_vring_wmark_low(struct vring *vring) static inline int wil_vring_wmark_low(struct vring *vring)
{ {
return vring->size/8; return vring->size/8;
} }
/** /* wil_vring_wmark_high - high watermark for available descriptor space */
* wil_vring_wmark_high - high watermark for available descriptor space
*/
static inline int wil_vring_wmark_high(struct vring *vring) static inline int wil_vring_wmark_high(struct vring *vring)
{ {
return vring->size/4; return vring->size/4;
} }
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
return val >= min && val < max;
}
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
{ {
struct device *dev = wil_to_dev(wil); struct device *dev = wil_to_dev(wil);
@ -98,8 +102,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->va = NULL; vring->va = NULL;
return -ENOMEM; return -ENOMEM;
} }
/* /* vring->va should be aligned on its size rounded up to power of 2
* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent * This is granted by the dma_alloc_coherent
*/ */
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@ -346,27 +349,6 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
} }
} }
/*
* Fast swap in place between 2 registers
*/
static void wil_swap_u16(u16 *a, u16 *b)
{
*a ^= *b;
*b ^= *a;
*a ^= *b;
}
static void wil_swap_ethaddr(void *data)
{
struct ethhdr *eth = data;
u16 *s = (u16 *)eth->h_source;
u16 *d = (u16 *)eth->h_dest;
wil_swap_u16(s++, d++);
wil_swap_u16(s++, d++);
wil_swap_u16(s, d);
}
/** /**
* reap 1 frame from @swhead * reap 1 frame from @swhead
* *
@ -386,17 +368,16 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
unsigned int sz = mtu_max + ETH_HLEN; unsigned int sz = mtu_max + ETH_HLEN;
u16 dmalen; u16 dmalen;
u8 ftype; u8 ftype;
u8 ds_bits;
int cid; int cid;
struct wil_net_stats *stats; struct wil_net_stats *stats;
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
if (wil_vring_is_empty(vring)) if (unlikely(wil_vring_is_empty(vring)))
return NULL; return NULL;
_d = &vring->va[vring->swhead].rx; _d = &vring->va[vring->swhead].rx;
if (!(_d->dma.status & RX_DMA_STATUS_DU)) { if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */ /* it is not error, we just reached end of Rx done area */
return NULL; return NULL;
} }
@ -416,7 +397,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false); (const void *)d, sizeof(*d), false);
if (dmalen > sz) { if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
@ -445,14 +426,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* in Rx descriptor. If type is not data, it is 802.11 frame as is * in Rx descriptor. If type is not data, it is 802.11 frame as is
*/ */
ftype = wil_rxdesc_ftype(d) << 2; ftype = wil_rxdesc_ftype(d) << 2;
if (ftype != IEEE80211_FTYPE_DATA) { if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
/* TODO: process it */ /* TODO: process it */
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
} }
if (skb->len < ETH_HLEN) { if (unlikely(skb->len < ETH_HLEN)) {
wil_err(wil, "Short frame, len = %d\n", skb->len); wil_err(wil, "Short frame, len = %d\n", skb->len);
/* TODO: process it (i.e. BAR) */ /* TODO: process it (i.e. BAR) */
kfree_skb(skb); kfree_skb(skb);
@ -463,9 +444,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
* and in case of error drop the packet * and in case of error drop the packet
* higher stack layers will handle retransmission (if required) * higher stack layers will handle retransmission (if required)
*/ */
if (d->dma.status & RX_DMA_STATUS_L4I) { if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
/* L4 protocol identified, csum calculated */ /* L4 protocol identified, csum calculated */
if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If HW reports bad checksum, let IP stack re-check it /* If HW reports bad checksum, let IP stack re-check it
* For example, HW don't understand Microsoft IP stack that * For example, HW don't understand Microsoft IP stack that
@ -474,15 +455,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
*/ */
} }
ds_bits = wil_rxdesc_ds_bits(d);
if (ds_bits == 1) {
/*
* HW bug - in ToDS mode, i.e. Rx on AP side,
* addresses get swapped
*/
wil_swap_ethaddr(skb->data);
}
return skb; return skb;
} }
@ -503,7 +475,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
(next_tail != v->swhead) && (count-- > 0); (next_tail != v->swhead) && (count-- > 0);
v->swtail = next_tail) { v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (rc) { if (unlikely(rc)) {
wil_err(wil, "Error %d in wil_rx_refill[%d]\n", wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
rc, v->swtail); rc, v->swtail);
break; break;
@ -565,7 +537,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
struct vring *v = &wil->vring_rx; struct vring *v = &wil->vring_rx;
struct sk_buff *skb; struct sk_buff *skb;
if (!v->va) { if (unlikely(!v->va)) {
wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return; return;
} }
@ -952,13 +924,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
uint i = swhead; uint i = swhead;
dma_addr_t pa; dma_addr_t pa;
int used;
wil_dbg_txrx(wil, "%s()\n", __func__); wil_dbg_txrx(wil, "%s()\n", __func__);
if (unlikely(!txdata->enabled)) if (unlikely(!txdata->enabled))
return -EINVAL; return -EINVAL;
if (avail < 1 + nr_frags) { if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil, wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n", "Tx ring[%2d] full. No space for %d fragments\n",
vring_index, 1 + nr_frags); vring_index, 1 + nr_frags);
@ -979,7 +952,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* 1-st segment */ /* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */ /* Process TCP/UDP checksum offloading */
if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index); vring_index);
goto dma_error; goto dma_error;
@ -1027,8 +1000,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
*/ */
vring->ctx[i].skb = skb_get(skb); vring->ctx[i].skb = skb_get(skb);
if (wil_vring_is_empty(vring)) /* performance monitoring */ /* performance monitoring */
used = wil_vring_used_tx(vring);
if (wil_val_in_range(vring_idle_trsh,
used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle; txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
vring_index, used, used + nr_frags + 1);
}
/* advance swhead */ /* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1); wil_vring_advance_head(vring, nr_frags + 1);
@ -1082,18 +1061,18 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int rc; int rc;
wil_dbg_txrx(wil, "%s()\n", __func__); wil_dbg_txrx(wil, "%s()\n", __func__);
if (!test_bit(wil_status_fwready, wil->status)) { if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) { if (!pr_once_fw) {
wil_err(wil, "FW not ready\n"); wil_err(wil, "FW not ready\n");
pr_once_fw = true; pr_once_fw = true;
} }
goto drop; goto drop;
} }
if (!test_bit(wil_status_fwconnected, wil->status)) { if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
wil_err(wil, "FW not connected\n"); wil_err(wil, "FW not connected\n");
goto drop; goto drop;
} }
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
wil_err(wil, "Xmit in monitor mode not supported\n"); wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop; goto drop;
} }
@ -1109,7 +1088,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else else
vring = wil_tx_bcast(wil, skb); vring = wil_tx_bcast(wil, skb);
} }
if (!vring) { if (unlikely(!vring)) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
goto drop; goto drop;
} }
@ -1117,7 +1096,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rc = wil_tx_vring(wil, vring, skb); rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */ /* do we still have enough room in the vring? */
if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) { if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
netif_tx_stop_all_queues(wil_to_ndev(wil)); netif_tx_stop_all_queues(wil_to_ndev(wil));
wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
} }
@ -1172,19 +1151,23 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
int cid = wil->vring2cid_tid[ringid][0]; int cid = wil->vring2cid_tid[ringid][0];
struct wil_net_stats *stats = &wil->sta[cid].stats; struct wil_net_stats *stats = &wil->sta[cid].stats;
volatile struct vring_tx_desc *_d; volatile struct vring_tx_desc *_d;
int used_before_complete;
int used_new;
if (!vring->va) { if (unlikely(!vring->va)) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0; return 0;
} }
if (!txdata->enabled) { if (unlikely(!txdata->enabled)) {
wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
return 0; return 0;
} }
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
used_before_complete = wil_vring_used_tx(vring);
while (!wil_vring_is_empty(vring)) { while (!wil_vring_is_empty(vring)) {
int new_swtail; int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail]; struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@ -1196,7 +1179,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
/* TODO: check we are not past head */ /* TODO: check we are not past head */
_d = &vring->va[lf].tx; _d = &vring->va[lf].tx;
if (!(_d->dma.status & TX_DMA_STATUS_DU)) if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break; break;
new_swtail = (lf + 1) % vring->size; new_swtail = (lf + 1) % vring->size;
@ -1224,7 +1207,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
wil_txdesc_unmap(dev, d, ctx); wil_txdesc_unmap(dev, d, ctx);
if (skb) { if (skb) {
if (d->dma.error == 0) { if (likely(d->dma.error == 0)) {
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
stats->tx_packets++; stats->tx_packets++;
ndev->stats.tx_bytes += skb->len; ndev->stats.tx_bytes += skb->len;
@ -1246,8 +1229,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
} }
} }
if (wil_vring_is_empty(vring)) { /* performance monitoring */ /* performance monitoring */
wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid); used_new = wil_vring_used_tx(vring);
if (wil_val_in_range(vring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ringid, used_before_complete, used_new);
txdata->last_idle = get_cycles(); txdata->last_idle = get_cycles();
} }

View file

@ -27,9 +27,11 @@ extern bool no_fw_recovery;
extern unsigned int mtu_max; extern unsigned int mtu_max;
extern unsigned short rx_ring_overflow_thrsh; extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize; extern int agg_wsize;
extern u32 vring_idle_trsh;
#define WIL_NAME "wil6210" #define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw" #define WIL_FW_NAME "wil6210.fw" /* code */
#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@ -120,6 +122,16 @@ struct RGF_ICR {
u32 IMC; /* Mask Clear, write 1 to clear */ u32 IMC; /* Mask Clear, write 1 to clear */
} __packed; } __packed;
struct RGF_BL {
u32 ready; /* 0x880A3C bit [0] */
#define BIT_BL_READY BIT(0)
u32 version; /* 0x880A40 version of the BL struct */
u32 rf_type; /* 0x880A44 ID of the connected RF */
u32 baseband_type; /* 0x880A48 ID of the baseband */
u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
u8 pad[2];
} __packed;
/* registers - FW addresses */ /* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018) #define RGF_USER_USAGE_6 (0x880018)
@ -130,6 +142,7 @@ struct RGF_ICR {
#define RGF_USER_MAC_CPU_0 (0x8801fc) #define RGF_USER_MAC_CPU_0 (0x8801fc)
#define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */ #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
#define RGF_USER_BL (0x880A3C) /* Boot Loader */
#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
#define RGF_USER_CLKS_CTL_0 (0x880abc) #define RGF_USER_CLKS_CTL_0 (0x880abc)
#define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */
@ -169,6 +182,13 @@ struct RGF_ICR {
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3) #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4) #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
/* Offload control (Sparrow B0+) */
#define RGF_DMA_OFUL_NID_0 (0x881cd4)
#define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN BIT(0)
#define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN BIT(1)
#define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC BIT(2)
#define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC BIT(3)
/* New (sparrow v2+) interrupt moderation control */ /* New (sparrow v2+) interrupt moderation control */
#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40) #define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34) #define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
@ -229,16 +249,10 @@ struct RGF_ICR {
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_MARLON_B0 (0x0612072f)
#define JTAG_DEV_ID_SPARROW_A0 (0x0632072f)
#define JTAG_DEV_ID_SPARROW_A1 (0x1632072f)
#define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
enum { enum {
HW_VER_UNKNOWN, HW_VER_UNKNOWN,
HW_VER_MARLON_B0, /* JTAG_DEV_ID_MARLON_B0 */
HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
}; };
@ -482,8 +496,6 @@ enum {
}; };
enum { enum {
hw_capability_reset_v2 = 0,
hw_capability_advanced_itr_moderation = 1,
hw_capability_last hw_capability_last
}; };
@ -528,7 +540,7 @@ struct wil6210_priv {
wait_queue_head_t wq; /* for all wait_event() use */ wait_queue_head_t wq; /* for all wait_event() use */
/* profile */ /* profile */
u32 monitor_flags; u32 monitor_flags;
u32 secure_pcp; /* create secure PCP? */ u32 privacy; /* secure connection? */
int sinfo_gen; int sinfo_gen;
/* interrupt moderation */ /* interrupt moderation */
u32 tx_max_burst_duration; u32 tx_max_burst_duration;
@ -658,7 +670,7 @@ int wil_if_add(struct wil6210_priv *wil);
void wil_if_remove(struct wil6210_priv *wil); void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil); int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil); void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state); void wil_set_recovery_state(struct wil6210_priv *wil, int state);
int wil_up(struct wil6210_priv *wil); int wil_up(struct wil6210_priv *wil);

View file

@ -281,7 +281,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
/*=== Event handlers ===*/ /*=== Event handlers ===*/
static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
{ {
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev; struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d; struct wmi_ready_event *evt = d;
@ -290,11 +289,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
evt->mac, wil->n_mids); evt->mac, wil->n_mids);
/* ignore MAC address, we already have it from the boot loader */
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
}
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
"%d", wil->fw_version); "%d", wil->fw_version);
} }
@ -879,7 +874,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
struct wmi_pcp_started_event evt; struct wmi_pcp_started_event evt;
} __packed reply; } __packed reply;
if (!wil->secure_pcp) if (!wil->privacy)
cmd.disable_sec = 1; cmd.disable_sec = 1;
if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||

View file

@ -4819,7 +4819,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
switch (dev->dev->bus_type) { switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA #ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA: case B43_BUS_BCMA:
bcma_core_pci_down(dev->dev->bdev->bus); bcma_host_pci_down(dev->dev->bdev->bus);
break; break;
#endif #endif
#ifdef CONFIG_B43_SSB #ifdef CONFIG_B43_SSB
@ -4866,9 +4866,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
switch (dev->dev->bus_type) { switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA #ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA: case B43_BUS_BCMA:
bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0], bcma_core_pci_irq_ctl(dev->dev->bdev->bus,
dev->dev->bdev, true); dev->dev->bdev, true);
bcma_core_pci_up(dev->dev->bdev->bus); bcma_host_pci_up(dev->dev->bdev->bus);
break; break;
#endif #endif
#ifdef CONFIG_B43_SSB #ifdef CONFIG_B43_SSB

View file

@ -58,6 +58,14 @@
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
struct brcmf_sdiod_freezer {
atomic_t freezing;
atomic_t thread_count;
u32 frozen_count;
wait_queue_head_t thread_freeze;
struct completion resumed;
};
static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE; static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0); module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]"); MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
@ -197,6 +205,30 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
return 0; return 0;
} }
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state)
{
if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
state == sdiodev->state)
return;
brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
switch (sdiodev->state) {
case BRCMF_SDIOD_DATA:
/* any other state means bus interface is down */
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
break;
case BRCMF_SDIOD_DOWN:
/* transition from DOWN to DATA means bus interface is up */
if (state == BRCMF_SDIOD_DATA)
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
break;
default:
break;
}
sdiodev->state = state;
}
static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func, static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
uint regaddr, u8 byte) uint regaddr, u8 byte)
{ {
@ -269,12 +301,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
return ret; return ret;
} }
static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
{
sdiodev->state = BRCMF_STATE_NOMEDIUM;
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
}
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write) u8 regsz, void *data, bool write)
{ {
@ -282,7 +308,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
s32 retry = 0; s32 retry = 0;
int ret; int ret;
if (sdiodev->state == BRCMF_STATE_NOMEDIUM) if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
return -ENOMEDIUM; return -ENOMEDIUM;
/* /*
@ -308,7 +334,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM) if (ret == -ENOMEDIUM)
brcmf_sdiod_nomedium_state(sdiodev); brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
else if (ret != 0) { else if (ret != 0) {
/* /*
* SleepCSR register access can fail when * SleepCSR register access can fail when
@ -331,7 +357,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
int err = 0, i; int err = 0, i;
u8 addr[3]; u8 addr[3];
if (sdiodev->state == BRCMF_STATE_NOMEDIUM) if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
return -ENOMEDIUM; return -ENOMEDIUM;
addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK; addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@ -460,7 +486,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr, err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
req_sz); req_sz);
if (err == -ENOMEDIUM) if (err == -ENOMEDIUM)
brcmf_sdiod_nomedium_state(sdiodev); brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
return err; return err;
} }
@ -595,7 +621,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) { if (ret == -ENOMEDIUM) {
brcmf_sdiod_nomedium_state(sdiodev); brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
break; break;
} else if (ret != 0) { } else if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n", brcmf_err("CMD53 sg block %s failed %d\n",
@ -877,6 +903,87 @@ static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->txglomsz = brcmf_sdiod_txglomsz; sdiodev->txglomsz = brcmf_sdiod_txglomsz;
} }
#ifdef CONFIG_PM_SLEEP
static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
{
sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
if (!sdiodev->freezer)
return -ENOMEM;
atomic_set(&sdiodev->freezer->thread_count, 0);
atomic_set(&sdiodev->freezer->freezing, 0);
init_waitqueue_head(&sdiodev->freezer->thread_freeze);
init_completion(&sdiodev->freezer->resumed);
return 0;
}
static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
{
if (sdiodev->freezer) {
WARN_ON(atomic_read(&sdiodev->freezer->freezing));
kfree(sdiodev->freezer);
}
}
static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
{
atomic_t *expect = &sdiodev->freezer->thread_count;
int res = 0;
sdiodev->freezer->frozen_count = 0;
reinit_completion(&sdiodev->freezer->resumed);
atomic_set(&sdiodev->freezer->freezing, 1);
brcmf_sdio_trigger_dpc(sdiodev->bus);
wait_event(sdiodev->freezer->thread_freeze,
atomic_read(expect) == sdiodev->freezer->frozen_count);
sdio_claim_host(sdiodev->func[1]);
res = brcmf_sdio_sleep(sdiodev->bus, true);
sdio_release_host(sdiodev->func[1]);
return res;
}
static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
{
sdio_claim_host(sdiodev->func[1]);
brcmf_sdio_sleep(sdiodev->bus, false);
sdio_release_host(sdiodev->func[1]);
atomic_set(&sdiodev->freezer->freezing, 0);
complete_all(&sdiodev->freezer->resumed);
}
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
{
return atomic_read(&sdiodev->freezer->freezing);
}
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
{
if (!brcmf_sdiod_freezing(sdiodev))
return;
sdiodev->freezer->frozen_count++;
wake_up(&sdiodev->freezer->thread_freeze);
wait_for_completion(&sdiodev->freezer->resumed);
}
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
{
atomic_inc(&sdiodev->freezer->thread_count);
}
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
{
atomic_dec(&sdiodev->freezer->thread_count);
}
#else
static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
{
return 0;
}
static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
{
}
#endif /* CONFIG_PM_SLEEP */
static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{ {
if (sdiodev->bus) { if (sdiodev->bus) {
@ -884,6 +991,8 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sdiodev->bus = NULL; sdiodev->bus = NULL;
} }
brcmf_sdiod_freezer_detach(sdiodev);
/* Disable Function 2 */ /* Disable Function 2 */
sdio_claim_host(sdiodev->func[2]); sdio_claim_host(sdiodev->func[2]);
sdio_disable_func(sdiodev->func[2]); sdio_disable_func(sdiodev->func[2]);
@ -955,6 +1064,10 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
*/ */
brcmf_sdiod_sgtable_alloc(sdiodev); brcmf_sdiod_sgtable_alloc(sdiodev);
ret = brcmf_sdiod_freezer_attach(sdiodev);
if (ret)
goto out;
/* try to attach to the target device */ /* try to attach to the target device */
sdiodev->bus = brcmf_sdio_probe(sdiodev); sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) { if (!sdiodev->bus) {
@ -1050,9 +1163,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
bus_if->wowl_supported = true; bus_if->wowl_supported = true;
#endif #endif
sdiodev->sleeping = false; brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->idle_wait);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n"); brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
err = brcmf_sdiod_probe(sdiodev); err = brcmf_sdiod_probe(sdiodev);
@ -1114,24 +1225,22 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev) static int brcmf_ops_sdio_suspend(struct device *dev)
{ {
struct sdio_func *func;
struct brcmf_bus *bus_if; struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev; struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags; mmc_pm_flag_t sdio_flags;
brcmf_dbg(SDIO, "Enter\n"); func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != SDIO_FUNC_1)
return 0;
bus_if = dev_get_drvdata(dev); bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio; sdiodev = bus_if->bus_priv.sdio;
/* wait for watchdog to go idle */ brcmf_sdiod_freezer_on(sdiodev);
if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
brcmf_err("bus still active\n");
return -EBUSY;
}
/* disable watchdog */
brcmf_sdio_wd_timer(sdiodev->bus, 0); brcmf_sdio_wd_timer(sdiodev->bus, 0);
atomic_set(&sdiodev->suspend, true);
if (sdiodev->wowl_enabled) { if (sdiodev->wowl_enabled) {
sdio_flags = MMC_PM_KEEP_POWER; sdio_flags = MMC_PM_KEEP_POWER;
@ -1149,12 +1258,13 @@ static int brcmf_ops_sdio_resume(struct device *dev)
{ {
struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported) if (func->num != SDIO_FUNC_2)
disable_irq_wake(sdiodev->pdata->oob_irq_nr); return 0;
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false); brcmf_sdiod_freezer_off(sdiodev);
return 0; return 0;
} }

View file

@ -1050,10 +1050,6 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
escan_req = false; escan_req = false;
if (request) { if (request) {
/* scan bss */ /* scan bss */
@ -1112,12 +1108,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
} }
} }
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
return 0; return 0;
scan_out: scan_out:
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (timer_pending(&cfg->escan_timeout))
del_timer_sync(&cfg->escan_timeout);
cfg->scan_request = NULL; cfg->scan_request = NULL;
return err; return err;
} }
@ -2252,7 +2250,6 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) { if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
/* we ignore this key index in this case */ /* we ignore this key index in this case */
brcmf_err("invalid key index (%d)\n", key_idx);
return -EINVAL; return -EINVAL;
} }
@ -4272,7 +4269,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return -EIO; return -EIO;
memcpy(&scbval.ea, params->mac, ETH_ALEN); memcpy(&scbval.ea, params->mac, ETH_ALEN);
scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING); scbval.val = cpu_to_le32(params->reason_code);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scbval)); &scbval, sizeof(scbval));
if (err) if (err)

View file

@ -944,6 +944,34 @@ int brcmf_attach(struct device *dev)
return ret; return ret;
} }
static int brcmf_revinfo_read(struct seq_file *s, void *data)
{
struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
char drev[BRCMU_DOTREV_LEN];
char brev[BRCMU_BOARDREV_LEN];
seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
seq_printf(s, "chiprev: %u\n", ri->chiprev);
seq_printf(s, "chippkg: %u\n", ri->chippkg);
seq_printf(s, "corerev: %u\n", ri->corerev);
seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
seq_printf(s, "bus: %u\n", ri->bus);
seq_printf(s, "phytype: %u\n", ri->phytype);
seq_printf(s, "phyrev: %u\n", ri->phyrev);
seq_printf(s, "anarev: %u\n", ri->anarev);
seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
return 0;
}
int brcmf_bus_start(struct device *dev) int brcmf_bus_start(struct device *dev)
{ {
int ret = -1; int ret = -1;
@ -974,6 +1002,8 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0) if (ret < 0)
goto fail; goto fail;
brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
/* assure we have chipid before feature attach */ /* assure we have chipid before feature attach */
if (!bus_if->chip) { if (!bus_if->chip) {
bus_if->chip = drvr->revinfo.chipnum; bus_if->chip = drvr->revinfo.chipnum;

View file

@ -515,6 +515,7 @@ struct brcmf_sdio {
bool txoff; /* Transmit flow-controlled */ bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt; struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */ bool sr_enabled; /* SaveRestore enabled */
bool sleeping;
u8 tx_hdrlen; /* sdio bus header length for tx packet */ u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */ bool txglom; /* host tx glomming enable flag */
@ -1013,12 +1014,12 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmf_dbg(SDIO, "Enter: request %s currently %s\n", brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"), (sleep ? "SLEEP" : "WAKE"),
(bus->sdiodev->sleeping ? "SLEEP" : "WAKE")); (bus->sleeping ? "SLEEP" : "WAKE"));
/* If SR is enabled control bus state with KSO */ /* If SR is enabled control bus state with KSO */
if (bus->sr_enabled) { if (bus->sr_enabled) {
/* Done if we're already in the requested state */ /* Done if we're already in the requested state */
if (sleep == bus->sdiodev->sleeping) if (sleep == bus->sleeping)
goto end; goto end;
/* Going to sleep */ /* Going to sleep */
@ -1026,6 +1027,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
/* Don't sleep if something is pending */ /* Don't sleep if something is pending */
if (atomic_read(&bus->intstatus) || if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 || atomic_read(&bus->ipend) > 0 ||
bus->ctrl_frame_stat ||
(!atomic_read(&bus->fcstate) && (!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus))) { data_ok(bus))) {
@ -1065,9 +1067,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
} else { } else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
} }
bus->sdiodev->sleeping = sleep; bus->sleeping = sleep;
if (sleep)
wake_up(&bus->sdiodev->idle_wait);
brcmf_dbg(SDIO, "new state %s\n", brcmf_dbg(SDIO, "new state %s\n",
(sleep ? "SLEEP" : "WAKE")); (sleep ? "SLEEP" : "WAKE"));
done: done:
@ -1909,7 +1909,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxpending = true; bus->rxpending = true;
for (rd->seq_num = bus->rx_seq, rxleft = maxframes; for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA; !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
rd->seq_num++, rxleft--) { rd->seq_num++, rxleft--) {
/* Handle glomming separately */ /* Handle glomming separately */
@ -2415,7 +2415,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
} }
/* Deflow-control stack if needed */ /* Deflow-control stack if needed */
if ((bus->sdiodev->state == BRCMF_STATE_DATA) && if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false; bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false); brcmf_txflowblock(bus->sdiodev->dev, false);
@ -2503,7 +2503,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL; bus->watchdog_tsk = NULL;
} }
if (sdiodev->state != BRCMF_STATE_NOMEDIUM) { if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(sdiodev->func[1]); sdio_claim_host(sdiodev->func[1]);
/* Enable clock for device interrupts */ /* Enable clock for device interrupts */
@ -2603,21 +2603,6 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
return ret; return ret;
} }
static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
{
#ifdef CONFIG_PM_SLEEP
int retry;
/* Wait for possible resume to complete */
retry = 0;
while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
msleep(20);
if (atomic_read(&sdiodev->suspend))
return -EIO;
#endif
return 0;
}
static void brcmf_sdio_dpc(struct brcmf_sdio *bus) static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{ {
u32 newstatus = 0; u32 newstatus = 0;
@ -2628,9 +2613,6 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
return;
sdio_claim_host(bus->sdiodev->func[1]); sdio_claim_host(bus->sdiodev->func[1]);
/* If waiting for HTAVAIL, check status */ /* If waiting for HTAVAIL, check status */
@ -2755,7 +2737,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_sdio_sendfromq(bus, framecnt); brcmf_sdio_sendfromq(bus, framecnt);
} }
if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) { if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
brcmf_err("failed backplane access over SDIO, halting operation\n"); brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0); atomic_set(&bus->intstatus, 0);
} else if (atomic_read(&bus->intstatus) || } else if (atomic_read(&bus->intstatus) ||
@ -2862,11 +2844,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
qcount[prec] = pktq_plen(&bus->txq, prec); qcount[prec] = pktq_plen(&bus->txq, prec);
#endif #endif
if (atomic_read(&bus->dpc_tskcnt) == 0) { brcmf_sdio_trigger_dpc(bus);
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
return ret; return ret;
} }
@ -2964,11 +2942,8 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
bus->ctrl_frame_buf = msg; bus->ctrl_frame_buf = msg;
bus->ctrl_frame_len = msglen; bus->ctrl_frame_len = msglen;
bus->ctrl_frame_stat = true; bus->ctrl_frame_stat = true;
if (atomic_read(&bus->dpc_tskcnt) == 0) {
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
brcmf_sdio_trigger_dpc(bus);
wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
msecs_to_jiffies(CTL_DONE_TIMEOUT)); msecs_to_jiffies(CTL_DONE_TIMEOUT));
@ -3411,7 +3386,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
} }
/* Allow full data communication using DPC from now on. */ /* Allow full data communication using DPC from now on. */
bus->sdiodev->state = BRCMF_STATE_DATA; brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
bcmerror = 0; bcmerror = 0;
err: err:
@ -3548,6 +3523,14 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
return err; return err;
} }
void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
{
if (atomic_read(&bus->dpc_tskcnt) == 0) {
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
}
void brcmf_sdio_isr(struct brcmf_sdio *bus) void brcmf_sdio_isr(struct brcmf_sdio *bus)
{ {
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
@ -3557,7 +3540,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return; return;
} }
if (bus->sdiodev->state != BRCMF_STATE_DATA) { if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
brcmf_err("bus is down. we have nothing to do\n"); brcmf_err("bus is down. we have nothing to do\n");
return; return;
} }
@ -3602,9 +3585,8 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
SDIO_CCCR_INTx, SDIO_CCCR_INTx,
NULL); NULL);
sdio_release_host(bus->sdiodev->func[1]); sdio_release_host(bus->sdiodev->func[1]);
intstatus = intstatus = devpend & (INTR_STATUS_FUNC1 |
devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
INTR_STATUS_FUNC2);
} }
/* If there is something, make like the ISR and /* If there is something, make like the ISR and
@ -3623,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
} }
#ifdef DEBUG #ifdef DEBUG
/* Poll for console output periodically */ /* Poll for console output periodically */
if (bus->sdiodev->state == BRCMF_STATE_DATA && if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
bus->console_interval != 0) { bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS; bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) { if (bus->console.count >= bus->console_interval) {
@ -3667,6 +3649,11 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
atomic_set(&bus->dpc_tskcnt, 0); atomic_set(&bus->dpc_tskcnt, 0);
brcmf_sdio_dpc(bus); brcmf_sdio_dpc(bus);
} }
if (brcmf_sdiod_freezing(bus->sdiodev)) {
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
brcmf_sdiod_try_freeze(bus->sdiodev);
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
}
} }
static void static void
@ -3944,13 +3931,19 @@ static int
brcmf_sdio_watchdog_thread(void *data) brcmf_sdio_watchdog_thread(void *data)
{ {
struct brcmf_sdio *bus = (struct brcmf_sdio *)data; struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
int wait;
allow_signal(SIGTERM); allow_signal(SIGTERM);
/* Run until signal received */ /* Run until signal received */
brcmf_sdiod_freezer_count(bus->sdiodev);
while (1) { while (1) {
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { brcmf_sdiod_freezer_uncount(bus->sdiodev);
wait = wait_for_completion_interruptible(&bus->watchdog_wait);
brcmf_sdiod_freezer_count(bus->sdiodev);
brcmf_sdiod_try_freeze(bus->sdiodev);
if (!wait) {
brcmf_sdio_bus_watchdog(bus); brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */ /* Count the tick for reference */
bus->sdcnt.tickcnt++; bus->sdcnt.tickcnt++;
@ -3971,7 +3964,7 @@ brcmf_sdio_watchdog(unsigned long data)
/* Reschedule the watchdog */ /* Reschedule the watchdog */
if (bus->wd_timer_valid) if (bus->wd_timer_valid)
mod_timer(&bus->timer, mod_timer(&bus->timer,
jiffies + BRCMF_WD_POLL_MS * HZ / 1000); jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
} }
} }
@ -4089,6 +4082,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{ {
int ret; int ret;
struct brcmf_sdio *bus; struct brcmf_sdio *bus;
struct workqueue_struct *wq;
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
@ -4117,12 +4111,16 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->sgentry_align = sdiodev->pdata->sd_sgentry_align; bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
} }
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); /* single-threaded workqueue */
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq"); wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
if (bus->brcmf_wq == NULL) { dev_name(&sdiodev->func[1]->dev));
if (!wq) {
brcmf_err("insufficient memory to create txworkqueue\n"); brcmf_err("insufficient memory to create txworkqueue\n");
goto fail; goto fail;
} }
brcmf_sdiod_freezer_count(sdiodev);
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = wq;
/* attempt to attach to the dongle */ /* attempt to attach to the dongle */
if (!(brcmf_sdio_probe_attach(bus))) { if (!(brcmf_sdio_probe_attach(bus))) {
@ -4143,7 +4141,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* Initialize watchdog thread */ /* Initialize watchdog thread */
init_completion(&bus->watchdog_wait); init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread, bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_watchdog"); bus, "brcmf_wdog/%s",
dev_name(&sdiodev->func[1]->dev));
if (IS_ERR(bus->watchdog_tsk)) { if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n"); pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL; bus->watchdog_tsk = NULL;
@ -4242,7 +4241,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
destroy_workqueue(bus->brcmf_wq); destroy_workqueue(bus->brcmf_wq);
if (bus->ci) { if (bus->ci) {
if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) { if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(bus->sdiodev->func[1]); sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false); brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is /* Leave the device in state where it is
@ -4277,7 +4276,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
} }
/* don't start the wd until fw is loaded */ /* don't start the wd until fw is loaded */
if (bus->sdiodev->state != BRCMF_STATE_DATA) if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
return; return;
if (wdtick) { if (wdtick) {
@ -4290,16 +4289,28 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
dynamically changed or in the first instance dynamically changed or in the first instance
*/ */
bus->timer.expires = bus->timer.expires =
jiffies + BRCMF_WD_POLL_MS * HZ / 1000; jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
add_timer(&bus->timer); add_timer(&bus->timer);
} else { } else {
/* Re arm the timer, at last watchdog period */ /* Re arm the timer, at last watchdog period */
mod_timer(&bus->timer, mod_timer(&bus->timer,
jiffies + BRCMF_WD_POLL_MS * HZ / 1000); jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
} }
bus->wd_timer_valid = true; bus->wd_timer_valid = true;
bus->save_ms = wdtick; bus->save_ms = wdtick;
} }
} }
int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
{
int ret;
sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdio_bus_sleep(bus, sleep, false);
sdio_release_host(bus->sdiodev->func[1]);
return ret;
}

View file

@ -155,11 +155,17 @@
/* watchdog polling interval in ms */ /* watchdog polling interval in ms */
#define BRCMF_WD_POLL_MS 10 #define BRCMF_WD_POLL_MS 10
/* The state of the bus */ /**
enum brcmf_sdio_state { * enum brcmf_sdiod_state - the state of the bus.
BRCMF_STATE_DOWN, /* Device available, still initialising */ *
BRCMF_STATE_DATA, /* Ready for data transfers, DPC enabled */ * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC.
BRCMF_STATE_NOMEDIUM /* No medium access to dongle possible */ * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled.
* @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible.
*/
enum brcmf_sdiod_state {
BRCMF_SDIOD_DOWN,
BRCMF_SDIOD_DATA,
BRCMF_SDIOD_NOMEDIUM
}; };
struct brcmf_sdreg { struct brcmf_sdreg {
@ -169,15 +175,13 @@ struct brcmf_sdreg {
}; };
struct brcmf_sdio; struct brcmf_sdio;
struct brcmf_sdiod_freezer;
struct brcmf_sdio_dev { struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS]; struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */ u8 num_funcs; /* Supported funcs on client */
u32 sbwad; /* Save backplane window address */ u32 sbwad; /* Save backplane window address */
struct brcmf_sdio *bus; struct brcmf_sdio *bus;
atomic_t suspend; /* suspend flag */
bool sleeping;
wait_queue_head_t idle_wait;
struct device *dev; struct device *dev;
struct brcmf_bus *bus_if; struct brcmf_bus *bus_if;
struct brcmfmac_sdio_platform_data *pdata; struct brcmfmac_sdio_platform_data *pdata;
@ -194,7 +198,8 @@ struct brcmf_sdio_dev {
char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
bool wowl_enabled; bool wowl_enabled;
enum brcmf_sdio_state state; enum brcmf_sdiod_state state;
struct brcmf_sdiod_freezer *freezer;
}; };
/* sdio core registers */ /* sdio core registers */
@ -337,6 +342,28 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Issue an abort to the specified function */ /* Issue an abort to the specified function */
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
#ifdef CONFIG_PM_SLEEP
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
#else
static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
{
return false;
}
static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
{
}
static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
{
}
static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
{
}
#endif /* CONFIG_PM_SLEEP */
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdio_remove(struct brcmf_sdio *bus); void brcmf_sdio_remove(struct brcmf_sdio *bus);
@ -344,5 +371,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick); void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
void brcmf_sdio_wowl_config(struct device *dev, bool enabled); void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
#endif /* BRCMFMAC_SDIO_H */ #endif /* BRCMFMAC_SDIO_H */

View file

@ -4668,7 +4668,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
brcms_c_coredisable(wlc_hw); brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */ /* Match driver "down" state */
bcma_core_pci_down(wlc_hw->d11core->bus); bcma_host_pci_down(wlc_hw->d11core->bus);
/* turn off pll and xtal to match driver "down" state */ /* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF); brcms_b_xtal(wlc_hw, OFF);
@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
* Configure pci/pcmcia here instead of in brcms_c_attach() * Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up. * to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/ */
bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core, bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
true); true);
/* /*
@ -4969,12 +4969,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/ */
if (brcms_b_radio_read_hwdisabled(wlc_hw)) { if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */ /* put SB PCI in down state again */
bcma_core_pci_down(wlc_hw->d11core->bus); bcma_host_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF); brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM; return -ENOMEDIUM;
} }
bcma_core_pci_up(wlc_hw->d11core->bus); bcma_host_pci_up(wlc_hw->d11core->bus);
/* reset the d11 core */ /* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@ -5171,7 +5171,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
/* turn off primary xtal and pll */ /* turn off primary xtal and pll */
if (!wlc_hw->noreset) { if (!wlc_hw->noreset) {
bcma_core_pci_down(wlc_hw->d11core->bus); bcma_host_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF); brcms_b_xtal(wlc_hw, OFF);
} }
} }

View file

@ -23041,10 +23041,7 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL) else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G1_SEL)
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1, wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
NPHY_RSSI_SEL_W1); NPHY_RSSI_SEL_W1);
else if (rssi_ctrl_state[0] == RADIO_2055_WBRSSI_G2_SEL) else /* RADIO_2055_WBRSSI_G2_SEL */
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
NPHY_RSSI_SEL_W2);
else
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1, wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE1,
NPHY_RSSI_SEL_W2); NPHY_RSSI_SEL_W2);
if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL) if (rssi_ctrl_state[1] == RADIO_2055_NBRSSI_SEL)
@ -23053,13 +23050,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL) else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G1_SEL)
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2, wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
NPHY_RSSI_SEL_W1); NPHY_RSSI_SEL_W1);
else if (rssi_ctrl_state[1] == RADIO_2055_WBRSSI_G2_SEL) else /* RADIO_2055_WBRSSI_G1_SEL */
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2, wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
NPHY_RSSI_SEL_W2); NPHY_RSSI_SEL_W2);
else
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_CORE2,
NPHY_RSSI_SEL_W2);
wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type); wlc_phy_rssisel_nphy(pi, RADIO_MIMO_CORESEL_OFF, rssi_type);
write_phy_reg(pi, 0x91, rfctrlintc_state[0]); write_phy_reg(pi, 0x91, rfctrlintc_state[0]);

View file

@ -1678,7 +1678,7 @@ il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
lq_sta->total_success > lq_sta->max_success_limit || lq_sta->total_success > lq_sta->max_success_limit ||
(!lq_sta->search_better_tbl && lq_sta->flush_timer && (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
flush_interval_passed)) { flush_interval_passed)) {
D_RATE("LQ: stay is expired %d %d %d\n:", D_RATE("LQ: stay is expired %d %d %d\n",
lq_sta->total_failed, lq_sta->total_success, lq_sta->total_failed, lq_sta->total_success,
flush_interval_passed); flush_interval_passed);

View file

@ -1549,7 +1549,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
table.blink1, table.blink2, table.ilink1, table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1, table.ilink2, table.bcon_time, table.gp1,
table.gp2, table.gp3, table.ucode_ver, table.gp2, table.gp3, table.ucode_ver,
table.hw_ver, table.brd_ver); table.hw_ver, 0, table.brd_ver);
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id)); desc_lookup(table.error_id));
IWL_ERR(priv, "0x%08X | uPc\n", table.pc); IWL_ERR(priv, "0x%08X | uPc\n", table.pc);

View file

@ -77,8 +77,8 @@
#define IWL3160_UCODE_API_OK 10 #define IWL3160_UCODE_API_OK 10
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 9 #define IWL7260_UCODE_API_MIN 10
#define IWL3160_UCODE_API_MIN 9 #define IWL3160_UCODE_API_MIN 10
/* NVM versions */ /* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d #define IWL7260_NVM_VERSION 0x0a1d

View file

@ -75,7 +75,7 @@
#define IWL8000_UCODE_API_OK 10 #define IWL8000_UCODE_API_OK 10
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 9 #define IWL8000_UCODE_API_MIN 10
/* NVM versions */ /* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d #define IWL8000_NVM_VERSION 0x0a1d

View file

@ -431,11 +431,11 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1, u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
u32 brd_ver), u32 brd_ver),
TP_ARGS(dev, desc, tsf_low, data1, data2, line, TP_ARGS(dev, desc, tsf_low, data1, data2, line,
blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
gp3, ucode_ver, hw_ver, brd_ver), gp3, major, minor, hw_ver, brd_ver),
TP_STRUCT__entry( TP_STRUCT__entry(
DEV_ENTRY DEV_ENTRY
__field(u32, desc) __field(u32, desc)
@ -451,7 +451,8 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__field(u32, gp1) __field(u32, gp1)
__field(u32, gp2) __field(u32, gp2)
__field(u32, gp3) __field(u32, gp3)
__field(u32, ucode_ver) __field(u32, major)
__field(u32, minor)
__field(u32, hw_ver) __field(u32, hw_ver)
__field(u32, brd_ver) __field(u32, brd_ver)
), ),
@ -470,21 +471,22 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__entry->gp1 = gp1; __entry->gp1 = gp1;
__entry->gp2 = gp2; __entry->gp2 = gp2;
__entry->gp3 = gp3; __entry->gp3 = gp3;
__entry->ucode_ver = ucode_ver; __entry->major = major;
__entry->minor = minor;
__entry->hw_ver = hw_ver; __entry->hw_ver = hw_ver;
__entry->brd_ver = brd_ver; __entry->brd_ver = brd_ver;
), ),
TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
"bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X " "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
"hw 0x%08X brd 0x%08X", "minor 0x%08X hw 0x%08X brd 0x%08X",
__get_str(dev), __entry->desc, __entry->tsf_low, __get_str(dev), __entry->desc, __entry->tsf_low,
__entry->data1, __entry->data1,
__entry->data2, __entry->line, __entry->blink1, __entry->data2, __entry->line, __entry->blink1,
__entry->blink2, __entry->ilink1, __entry->ilink2, __entry->blink2, __entry->ilink1, __entry->ilink2,
__entry->bcon_time, __entry->gp1, __entry->gp2, __entry->bcon_time, __entry->gp1, __entry->gp2,
__entry->gp3, __entry->ucode_ver, __entry->hw_ver, __entry->gp3, __entry->major, __entry->minor,
__entry->brd_ver) __entry->hw_ver, __entry->brd_ver)
); );
TRACE_EVENT(iwlwifi_dev_ucode_event, TRACE_EVENT(iwlwifi_dev_ucode_event,

View file

@ -175,6 +175,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_dest_tlv); kfree(drv->fw.dbg_dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
kfree(drv->fw.dbg_conf_tlv[i]); kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i); iwl_free_fw_img(drv, drv->fw.img + i);
@ -293,8 +295,10 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */ /* FW debug data parsed for driver usage */
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
}; };
/* /*
@ -842,6 +846,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_scan_channels = capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data); le32_to_cpup((__le32 *)tlv_data);
break; break;
case IWL_UCODE_TLV_FW_VERSION: {
__le32 *ptr = (void *)tlv_data;
u32 major, minor;
u8 local_comp;
if (tlv_len != sizeof(u32) * 3)
goto invalid_tlv_len;
major = le32_to_cpup(ptr++);
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version), "%u.%u.%u",
major, minor, local_comp);
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: { case IWL_UCODE_TLV_FW_DBG_DEST: {
struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
@ -897,6 +918,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_conf_tlv_len[conf->id] = tlv_len; pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
break; break;
} }
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
struct iwl_fw_dbg_trigger_tlv *trigger =
(void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
IWL_ERR(drv,
"Skip unknown trigger: %u\n",
trigger->id);
break;
}
if (pieces->dbg_trigger_tlv[trigger_id]) {
IWL_ERR(drv,
"Ignore duplicate dbg trigger %u\n",
trigger->id);
break;
}
IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
pieces->dbg_trigger_tlv[trigger_id] = trigger;
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER: case IWL_UCODE_TLV_SEC_RT_USNIFFER:
usniffer_images = true; usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data, iwl_store_ucode_sec(pieces, tlv_data,
@ -1107,7 +1153,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err) if (err)
goto try_again; goto try_again;
api_ver = IWL_UCODE_API(drv->fw.ucode_ver); if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
/* /*
* api_ver should match the api version forming part of the * api_ver should match the api version forming part of the
@ -1178,6 +1227,19 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
} }
} }
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
drv->fw.dbg_trigger_tlv_len[i] =
pieces->dbg_trigger_tlv_len[i];
drv->fw.dbg_trigger_tlv[i] =
kmemdup(pieces->dbg_trigger_tlv[i],
drv->fw.dbg_trigger_tlv_len[i],
GFP_KERNEL);
if (!drv->fw.dbg_trigger_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */ /* Now that we can no longer fail, copy information */
/* /*

View file

@ -82,6 +82,8 @@
* sections like this in a single file. * sections like this in a single file.
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
*/ */
enum iwl_fw_error_dump_type { enum iwl_fw_error_dump_type {
/* 0 is deprecated */ /* 0 is deprecated */
@ -94,6 +96,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_TXF = 7, IWL_FW_ERROR_DUMP_TXF = 7,
IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
IWL_FW_ERROR_DUMP_MAX, IWL_FW_ERROR_DUMP_MAX,
}; };
@ -230,4 +233,47 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
return (void *)(data->data + le32_to_cpu(data->len)); return (void *)(data->data + le32_to_cpu(data->len));
} }
/**
* enum iwl_fw_dbg_trigger - triggers available
*
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
* user.
* @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
* @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
* missed.
* @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
* @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
* command response or a notification.
* @FW_DB_TRIGGER_RESERVED: reserved
* @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
* @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
* goes below a threshold.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
FW_DBG_TRIGGER_USER,
FW_DBG_TRIGGER_FW_ASSERT,
FW_DBG_TRIGGER_MISSED_BEACONS,
FW_DBG_TRIGGER_CHANNEL_SWITCH,
FW_DBG_TRIGGER_FW_NOTIF,
FW_DB_TRIGGER_RESERVED,
FW_DBG_TRIGGER_STATS,
FW_DBG_TRIGGER_RSSI,
/* must be last */
FW_DBG_TRIGGER_MAX,
};
/**
* struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
* @type: %enum iwl_fw_dbg_trigger
* @data: raw data about what happened
*/
struct iwl_fw_error_dump_trigger_desc {
__le32 type;
u8 data[];
};
#endif /* __fw_error_dump_h__ */ #endif /* __fw_error_dump_h__ */

View file

@ -66,6 +66,7 @@
#define __iwl_fw_file_h__ #define __iwl_fw_file_h__
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/nl80211.h>
/* v1/v2 uCode file layout */ /* v1/v2 uCode file layout */
struct iwl_ucode_header { struct iwl_ucode_header {
@ -133,8 +134,10 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
}; };
struct iwl_ucode_tlv { struct iwl_ucode_tlv {
@ -156,7 +159,8 @@ struct iwl_tlv_ucode_header {
__le32 zero; __le32 zero;
__le32 magic; __le32 magic;
u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
__le32 ver; /* major/minor/API/serial */ /* major/minor/API/serial or major in new format */
__le32 ver;
__le32 build; __le32 build;
__le64 ignore; __le64 ignore;
/* /*
@ -237,7 +241,6 @@ enum iwl_ucode_tlv_flag {
* enum iwl_ucode_tlv_api - ucode api * enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
* @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan. * longer than the passive one, which is essential for fragmented scan.
@ -250,11 +253,12 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
* @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported. * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
@ -263,6 +267,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17), IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18), IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
}; };
/** /**
@ -284,6 +290,8 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command * which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
*/ */
enum iwl_ucode_tlv_capa { enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@ -298,6 +306,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13), IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
}; };
/* The default calibrate table size if not specified by firmware file */ /* The default calibrate table size if not specified by firmware file */
@ -450,44 +460,129 @@ struct iwl_fw_dbg_conf_hcmd {
} __packed; } __packed;
/** /**
* struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration * enum iwl_fw_dbg_trigger_mode - triggers functionalities
* *
* @enabled: is this trigger enabled * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
* @reserved: * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
* @len: length, in bytes, of the %trigger field
* @trigger: pointer to a trigger struct
*/ */
struct iwl_fw_dbg_trigger { enum iwl_fw_dbg_trigger_mode {
u8 enabled; IWL_FW_DBG_TRIGGER_START = BIT(0),
u8 reserved; IWL_FW_DBG_TRIGGER_STOP = BIT(1),
u8 len;
u8 trigger[0];
} __packed;
/**
* enum iwl_fw_dbg_conf - configurations available
*
* @FW_DBG_CUSTOM: take this configuration from alive
* Note that the trigger is NO-OP for this configuration
*/
enum iwl_fw_dbg_conf {
FW_DBG_CUSTOM = 0,
/* must be last */
FW_DBG_MAX,
FW_DBG_INVALID = 0xff,
}; };
/** /**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
* * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
* @id: %enum iwl_fw_dbg_conf * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
* @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
* @IWL_FW_DBG_CONF_VIF_AP: AP mode
* @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
* @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
* @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
*/
enum iwl_fw_dbg_trigger_vif_type {
IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
};
/**
* struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
* @id: %enum iwl_fw_dbg_trigger
* @vif_type: %enum iwl_fw_dbg_trigger_vif_type
* @stop_conf_ids: bitmap of configurations this trigger relates to.
* if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
* to the currently running configuration is set, the data should be
* collected.
* @stop_delay: how many milliseconds to wait before collecting the data
* after the STOP trigger fires.
* @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
* @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
* configuration should be applied when the triggers kicks in.
* @occurrences: number of occurrences. 0 means the trigger will never fire.
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
__le32 vif_type;
__le32 stop_conf_ids;
__le32 stop_delay;
u8 mode;
u8 start_conf_id;
__le16 occurrences;
__le32 reserved[2];
u8 data[0];
} __packed;
#define FW_DBG_START_FROM_ALIVE 0
#define FW_DBG_CONF_MAX 32
#define FW_DBG_INVALID 0xff
/**
* struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
* @stop_consec_missed_bcon: stop recording if threshold is crossed.
* @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
* @start_consec_missed_bcon: start recording if threshold is crossed.
* @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
* @reserved1: reserved
* @reserved2: reserved
*/
struct iwl_fw_dbg_trigger_missed_bcon {
__le32 stop_consec_missed_bcon;
__le32 stop_consec_missed_bcon_since_rx;
__le32 reserved2[2];
__le32 start_consec_missed_bcon;
__le32 start_consec_missed_bcon_since_rx;
__le32 reserved1[2];
} __packed;
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
* cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
u8 cmd_id;
u8 group_id;
} __packed cmds[16];
} __packed;
/**
* iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
* @start_threshold: the threshold above which to start recording
*/
struct iwl_fw_dbg_trigger_stats {
__le32 stop_offset;
__le32 stop_threshold;
__le32 start_offset;
__le32 start_threshold;
} __packed;
/**
* struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
* @rssi: RSSI value to trigger at
*/
struct iwl_fw_dbg_trigger_low_rssi {
__le32 rssi;
} __packed;
/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id
* @usniffer: should the uSniffer image be used * @usniffer: should the uSniffer image be used
* @num_of_hcmds: how many HCMDs to send are present here * @num_of_hcmds: how many HCMDs to send are present here
* @hcmd: a variable length host command to be sent to apply the configuration. * @hcmd: a variable length host command to be sent to apply the configuration.
* If there is more than one HCMD to send, they will appear one after the * If there is more than one HCMD to send, they will appear one after the
* other and be sent in the order that they appear in. * other and be sent in the order that they appear in.
* This parses IWL_UCODE_TLV_FW_DBG_CONF * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
* %FW_DBG_CONF_MAX configuration per run.
*/ */
struct iwl_fw_dbg_conf_tlv { struct iwl_fw_dbg_conf_tlv {
u8 id; u8 id;
@ -495,8 +590,6 @@ struct iwl_fw_dbg_conf_tlv {
u8 reserved; u8 reserved;
u8 num_of_hcmds; u8 num_of_hcmds;
struct iwl_fw_dbg_conf_hcmd hcmd; struct iwl_fw_dbg_conf_hcmd hcmd;
/* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
} __packed; } __packed;
#endif /* __iwl_fw_file_h__ */ #endif /* __iwl_fw_file_h__ */

View file

@ -68,6 +68,7 @@
#include <net/mac80211.h> #include <net/mac80211.h>
#include "iwl-fw-file.h" #include "iwl-fw-file.h"
#include "iwl-fw-error-dump.h"
/** /**
* enum iwl_ucode_type * enum iwl_ucode_type
@ -157,6 +158,8 @@ struct iwl_fw_cscheme_list {
* @dbg_dest_tlv: points to the destination TLV for debug * @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
* @dbg_trigger_tlv: array of pointers to triggers TLVs
* @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/ */
struct iwl_fw { struct iwl_fw {
@ -186,9 +189,10 @@ struct iwl_fw {
u32 sdio_adma_addr; u32 sdio_adma_addr;
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
}; };
@ -206,37 +210,6 @@ static inline const char *get_fw_dbg_mode_string(int mode)
} }
} }
static inline const struct iwl_fw_dbg_trigger *
iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
u8 *ptr;
int i;
if (!conf_tlv)
return NULL;
ptr = (void *)&conf_tlv->hcmd;
for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
ptr += sizeof(conf_tlv->hcmd);
ptr += le16_to_cpu(conf_tlv->hcmd.len);
}
return (const struct iwl_fw_dbg_trigger *)ptr;
}
static inline bool
iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_trigger *trigger =
iwl_fw_dbg_conf_get_trigger(fw, id);
if (!trigger)
return false;
return trigger->enabled;
}
static inline bool static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{ {
@ -248,4 +221,18 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
return conf_tlv->usniffer; return conf_tlv->usniffer;
} }
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
unlikely(__dbg_trigger); \
})
static inline struct iwl_fw_dbg_trigger_tlv*
iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id)
{
if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv)))
return NULL;
return fw->dbg_trigger_tlv[id];
}
#endif /* __iwl_fw_h__ */ #endif /* __iwl_fw_h__ */

View file

@ -72,7 +72,7 @@
#include "iwl-trans.h" #include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ #define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
#define IWL_NUM_PAPD_CH_GROUPS 7 #define IWL_NUM_PAPD_CH_GROUPS 9
#define IWL_NUM_TXP_CH_GROUPS 9 #define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry { struct iwl_phy_db_entry {

View file

@ -370,7 +370,6 @@ enum secure_load_status_reg {
#define MON_BUFF_CYCLE_CNT (0xa03c48) #define MON_BUFF_CYCLE_CNT (0xa03c48)
#define DBGC_IN_SAMPLE (0xa03c00) #define DBGC_IN_SAMPLE (0xa03c00)
#define DBGC_OUT_CTRL (0xa03c0c)
/* FW chicken bits */ /* FW chicken bits */
#define LMPM_CHICK 0xA01FF8 #define LMPM_CHICK 0xA01FF8

View file

@ -595,6 +595,7 @@ enum iwl_d0i3_mode {
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
* @dbg_dest_tlv: points to the destination TLV for debug * @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/ */
struct iwl_trans { struct iwl_trans {
@ -628,7 +629,8 @@ struct iwl_trans {
u64 dflt_pwr_limit; u64 dflt_pwr_limit;
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
enum iwl_d0i3_mode d0i3_mode; enum iwl_d0i3_mode d0i3_mode;

View file

@ -611,7 +611,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->enabled_modules |= bt_cmd->enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (IWL_MVM_BT_COEX_CORUNNING) if (iwl_mvm_bt_is_plcr_supported(mvm))
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) { if (IWL_MVM_BT_COEX_MPLUT) {
@ -1234,7 +1234,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!IWL_MVM_BT_COEX_CORUNNING) if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0; return 0;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);

View file

@ -619,7 +619,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
if (IWL_MVM_BT_COEX_SYNC2SCO) if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) { if (iwl_mvm_bt_is_plcr_supported(mvm)) {
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40); BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
@ -1167,16 +1167,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT; return lut_type != BT_COEX_LOOSE_LUT;
} }
bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
{
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
return ag < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
{ {
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
return ag == BT_OFF; return ag < BT_HIGH_TRAFFIC;
} }
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
@ -1213,7 +1207,7 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, },
}; };
if (!IWL_MVM_BT_COEX_CORUNNING) if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0; return 0;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);

View file

@ -1876,25 +1876,28 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (mvm->net_detect) { if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif); iwl_mvm_query_netdetect_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
} else { } else {
keep = iwl_mvm_query_wakeup_reasons(mvm, vif); keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
if (keep) if (keep)
mvm->keep_vif = vif; mvm->keep_vif = vif;
/* has unlocked the mutex, so skip that */
goto out_iterate;
#endif #endif
} }
/* has unlocked the mutex, so skip that */
goto out;
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
out: out_iterate:
if (!test) if (!test)
ieee80211_iterate_active_interfaces_rtnl(mvm->hw, ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
/* return 1 to reconfigure the device */ /* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);

View file

@ -545,6 +545,57 @@ static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
return ret ? count : -EINVAL; return ret ? count : -EINVAL;
} }
static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
struct iwl_mvm_phy_ctxt *phy_ctxt;
u16 value;
int ret;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* make sure the channel context is assigned */
if (!chanctx_conf) {
rcu_read_unlock();
mutex_unlock(&mvm->mutex);
return -EINVAL;
}
phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
rcu_read_unlock();
mvm->dbgfs_rx_phyinfo = value;
ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
chanctx_conf->rx_chains_static,
chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[8];
snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
}
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@ -560,6 +611,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{ {
@ -595,6 +647,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR); S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR); S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif) mvmvif == mvm->bf_allowed_vif)

View file

@ -942,7 +942,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iwl_mvm *mvm = file->private_data; struct iwl_mvm *mvm = file->private_data;
enum iwl_fw_dbg_conf conf; int conf;
char buf[8]; char buf[8];
const size_t bufsz = sizeof(buf); const size_t bufsz = sizeof(buf);
int pos = 0; int pos = 0;
@ -966,7 +966,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
if (ret) if (ret)
return ret; return ret;
if (WARN_ON(conf_id >= FW_DBG_MAX)) if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
return -EINVAL; return -EINVAL;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -985,7 +985,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (ret) if (ret)
return ret; return ret;
iwl_mvm_fw_dbg_collect(mvm); iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);

View file

@ -70,6 +70,7 @@
#define MAC_INDEX_AUX 4 #define MAC_INDEX_AUX 4
#define MAC_INDEX_MIN_DRIVER 0 #define MAC_INDEX_MIN_DRIVER 0
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
enum iwl_ac { enum iwl_ac {
AC_BK, AC_BK,

View file

@ -70,54 +70,9 @@
/* Scan Commands, Responses, Notifications */ /* Scan Commands, Responses, Notifications */
/* Masks for iwl_scan_channel.type flags */
#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
#define SCAN_CHANNEL_NARROW_BAND BIT(22)
/* Max number of IEs for direct SSID scans in a command */ /* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20 #define PROBE_OPTION_MAX 20
/**
* struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
* @channel: band is selected by iwl_scan_cmd "flags" field
* @tx_gain: gain for analog radio
* @dsp_atten: gain for DSP
* @active_dwell: dwell time for active scan in TU, typically 5-50
* @passive_dwell: dwell time for passive scan in TU, typically 20-500
* @type: type is broken down to these bits:
* bit 0: 0 = passive, 1 = active
* bits 1-20: SSID direct bit map. If any of these bits is set then
* the corresponding SSID IE is transmitted in probe request
* (bit i adds IE in position i to the probe request)
* bit 22: channel width, 0 = regular, 1 = TGj narrow channel
*
* @iteration_count:
* @iteration_interval:
* This struct is used once for each channel in the scan list.
* Each channel can independently select:
* 1) SSID for directed active scans
* 2) Txpower setting (for rate specified within Tx command)
* 3) How long to stay on-channel (behavior may be modified by quiet_time,
* quiet_plcp_th, good_CRC_th)
*
* To avoid uCode errors, make sure the following are true (see comments
* under struct iwl_scan_cmd about max_out_time and quiet_time):
* 1) If using passive_dwell (i.e. passive_dwell != 0):
* active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
* 2) quiet_time <= active_dwell
* 3) If restricting off-channel time (i.e. max_out_time !=0):
* passive_dwell < max_out_time
* active_dwell < max_out_time
*/
struct iwl_scan_channel {
__le32 type;
__le16 channel;
__le16 iteration_count;
__le32 iteration_interval;
__le16 active_dwell;
__le16 passive_dwell;
} __packed; /* SCAN_CHANNEL_CONTROL_API_S_VER_1 */
/** /**
* struct iwl_ssid_ie - directed scan network information element * struct iwl_ssid_ie - directed scan network information element
* *
@ -132,152 +87,6 @@ struct iwl_ssid_ie {
u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
/**
* iwl_scan_flags - masks for scan command flags
*@SCAN_FLAGS_PERIODIC_SCAN:
*@SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX:
*@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
*@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
*@SCAN_FLAGS_FRAGMENTED_SCAN:
*@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
* in the past hour, even if they are marked as passive.
*/
enum iwl_scan_flags {
SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
SCAN_FLAGS_P2P_PUBLIC_ACTION_FRAME_TX = BIT(1),
SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
};
/**
* enum iwl_scan_type - Scan types for scan command
* @SCAN_TYPE_FORCED:
* @SCAN_TYPE_BACKGROUND:
* @SCAN_TYPE_OS:
* @SCAN_TYPE_ROAMING:
* @SCAN_TYPE_ACTION:
* @SCAN_TYPE_DISCOVERY:
* @SCAN_TYPE_DISCOVERY_FORCED:
*/
enum iwl_scan_type {
SCAN_TYPE_FORCED = 0,
SCAN_TYPE_BACKGROUND = 1,
SCAN_TYPE_OS = 2,
SCAN_TYPE_ROAMING = 3,
SCAN_TYPE_ACTION = 4,
SCAN_TYPE_DISCOVERY = 5,
SCAN_TYPE_DISCOVERY_FORCED = 6,
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
/**
* struct iwl_scan_cmd - scan request command
* ( SCAN_REQUEST_CMD = 0x80 )
* @len: command length in bytes
* @scan_flags: scan flags from SCAN_FLAGS_*
* @channel_count: num of channels in channel list
* (1 - ucode_capa.n_scan_channels)
* @quiet_time: in msecs, dwell this time for active scan on quiet channels
* @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
* this number of packets were received (typically 1)
* @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
* @max_out_time: in TUs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
* bits 0-19: beacon interal in TUs (suspend before executing)
* bits 20-23: reserved
* bits 24-31: number of beacons (suspend between channels)
* @rxon_flags: RXON_FLG_*
* @filter_flags: RXON_FILTER_*
* @tx_cmd: for active scans (zero for passive), w/o payload,
* no RS so specify TX rate
* @direct_scan: direct scan SSIDs
* @type: one of SCAN_TYPE_*
* @repeats: how many time to repeat the scan
*/
struct iwl_scan_cmd {
__le16 len;
u8 scan_flags;
u8 channel_count;
__le16 quiet_time;
__le16 quiet_plcp_th;
__le16 passive2active;
__le16 rxchain_sel_flags;
__le32 max_out_time;
__le32 suspend_time;
/* RX_ON_FLAGS_API_S_VER_1 */
__le32 rxon_flags;
__le32 filter_flags;
struct iwl_tx_cmd tx_cmd;
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
__le32 type;
__le32 repeats;
/*
* Probe request frame, followed by channel list.
*
* Size of probe request frame is specified by byte count in tx_cmd.
* Channel list follows immediately after probe request frame.
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
* struct iwl_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
* for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
* before requesting another scan.
*/
u8 data[0];
} __packed; /* SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
/* Response to scan request contains only status with one of these values */
#define SCAN_RESPONSE_OK 0x1
#define SCAN_RESPONSE_ERROR 0x2
/*
* SCAN_ABORT_CMD = 0x81
* When scan abort is requested, the command has no fields except the common
* header. The response contains only a status with one of these values.
*/
#define SCAN_ABORT_POSSIBLE 0x1
#define SCAN_ABORT_IGNORED 0x2 /* no pending scans */
/* TODO: complete documentation */
#define SCAN_OWNER_STATUS 0x1
#define MEASURE_OWNER_STATUS 0x2
/**
* struct iwl_scan_start_notif - notifies start of scan in the device
* ( SCAN_START_NOTIFICATION = 0x82 )
* @tsf_low: TSF timer (lower half) in usecs
* @tsf_high: TSF timer (higher half) in usecs
* @beacon_timer: structured as follows:
* bits 0:19 - beacon interval in usecs
* bits 20:23 - reserved (0)
* bits 24:31 - number of beacons
* @channel: which channel is scanned
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz
* @status: one of *_OWNER_STATUS
*/
struct iwl_scan_start_notif {
__le32 tsf_low;
__le32 tsf_high;
__le32 beacon_timer;
u8 channel;
u8 band;
u8 reserved[2];
__le32 status;
} __packed; /* SCAN_START_NTF_API_S_VER_1 */
/* scan results probe_status first bit indicates success */
#define SCAN_PROBE_STATUS_OK 0
#define SCAN_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
#define SCAN_PROBE_STATUS_FAIL_TTL BIT(1)
#define SCAN_PROBE_STATUS_FAIL_BT BIT(2)
/* How many statistics are gathered for each channel */ /* How many statistics are gathered for each channel */
#define SCAN_RESULTS_STATISTICS 1 #define SCAN_RESULTS_STATISTICS 1

View file

@ -65,6 +65,7 @@
#ifndef __fw_api_stats_h__ #ifndef __fw_api_stats_h__
#define __fw_api_stats_h__ #define __fw_api_stats_h__
#include "fw-api-mac.h"
struct mvm_statistics_dbg { struct mvm_statistics_dbg {
__le32 burst_check; __le32 burst_check;
@ -218,7 +219,7 @@ struct mvm_statistics_bt_activity {
__le32 lo_priority_rx_denied_cnt; __le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
struct mvm_statistics_general { struct mvm_statistics_general_v5 {
__le32 radio_temperature; __le32 radio_temperature;
__le32 radio_voltage; __le32 radio_voltage;
struct mvm_statistics_dbg dbg; struct mvm_statistics_dbg dbg;
@ -244,6 +245,39 @@ struct mvm_statistics_general {
struct mvm_statistics_bt_activity bt_activity; struct mvm_statistics_bt_activity bt_activity;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ } __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
struct mvm_statistics_general_v8 {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
struct mvm_statistics_div slow_div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
* count the number of times we have to re-tune
* in order to get out of bad PHY status
*/
__le32 num_of_sos_states;
__le32 beacon_filtered;
__le32 missed_beacons;
__s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
__le32 beacon_filter_delta_time;
struct mvm_statistics_bt_activity bt_activity;
__le64 rx_time;
__le64 on_time_rf;
__le64 on_time_scan;
__le64 tx_time;
__le32 beacon_counter[NUM_MAC_INDEX];
u8 beacon_average_energy[NUM_MAC_INDEX];
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
struct mvm_statistics_rx { struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck; struct mvm_statistics_rx_phy cck;
@ -256,22 +290,28 @@ struct mvm_statistics_rx {
* *
* By default, uCode issues this notification after receiving a beacon * By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* REPLY_STATISTICS_CMD 0x9c, above. * STATISTICS_CMD (0x9c), below.
*
* Statistics counters continue to increment beacon after beacon, but are
* cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
* 0x9c with CLEAR_STATS bit set (see above).
*
* uCode also issues this notification during scans. uCode clears statistics
* appropriately so that each notification contains statistics for only the
* one channel that has just been scanned.
*/ */
struct iwl_notif_statistics { struct iwl_notif_statistics_v8 {
__le32 flag; __le32 flag;
struct mvm_statistics_rx rx; struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx; struct mvm_statistics_tx tx;
struct mvm_statistics_general general; struct mvm_statistics_general_v5 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_8 */ } __packed; /* STATISTICS_NTFY_API_S_VER_8 */
struct iwl_notif_statistics_v10 {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general_v8 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
#define IWL_STATISTICS_FLG_CLEAR 0x1
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
struct iwl_statistics_cmd {
__le32 flags;
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
#endif /* __fw_api_stats_h__ */ #endif /* __fw_api_stats_h__ */

View file

@ -192,6 +192,7 @@ enum {
BEACON_NOTIFICATION = 0x90, BEACON_NOTIFICATION = 0x90,
BEACON_TEMPLATE_CMD = 0x91, BEACON_TEMPLATE_CMD = 0x91,
TX_ANT_CONFIGURATION_CMD = 0x98, TX_ANT_CONFIGURATION_CMD = 0x98,
STATISTICS_CMD = 0x9c,
STATISTICS_NOTIFICATION = 0x9d, STATISTICS_NOTIFICATION = 0x9d,
EOSP_NOTIFICATION = 0x9e, EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f, REDUCE_TX_POWER_CMD = 0x9f,
@ -431,7 +432,7 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0) #define IWL_ALIVE_FLG_RFKILL BIT(0)
struct mvm_alive_resp { struct mvm_alive_resp_ver1 {
__le16 status; __le16 status;
__le16 flags; __le16 flags;
u8 ucode_minor; u8 ucode_minor;
@ -482,6 +483,30 @@ struct mvm_alive_resp_ver2 {
__le32 dbg_print_buff_addr; __le32 dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_2 */ } __packed; /* ALIVE_RES_API_S_VER_2 */
struct mvm_alive_resp {
__le16 status;
__le16 flags;
__le32 ucode_minor;
__le32 ucode_major;
u8 ver_subtype;
u8 ver_type;
u8 mac;
u8 opt;
__le32 timestamp;
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
__le32 cpu_register_ptr;
__le32 dbgm_config_ptr;
__le32 alive_counter_ptr;
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
__le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_3 */
/* Error response/notification */ /* Error response/notification */
enum { enum {
FW_ERR_UNKNOWN_CMD = 0x0, FW_ERR_UNKNOWN_CMD = 0x0,

View file

@ -112,25 +112,27 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm = struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait); container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data; struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive; struct mvm_alive_resp_ver1 *palive1;
struct mvm_alive_resp_ver2 *palive2; struct mvm_alive_resp_ver2 *palive2;
struct mvm_alive_resp *palive;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
palive = (void *)pkt->data; palive1 = (void *)pkt->data;
mvm->support_umac_log = false; mvm->support_umac_log = false;
mvm->error_event_table = mvm->error_event_table =
le32_to_cpu(palive->error_event_table_ptr); le32_to_cpu(palive1->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr); mvm->log_event_table =
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); le32_to_cpu(palive1->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
alive_data->valid = le16_to_cpu(palive->status) == alive_data->valid = le16_to_cpu(palive1->status) ==
IWL_ALIVE_STATUS_OK; IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm, IWL_DEBUG_FW(mvm,
"Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive->status), palive->ver_type, le16_to_cpu(palive1->status), palive1->ver_type,
palive->ver_subtype, palive->flags); palive1->ver_subtype, palive1->flags);
} else { } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
palive2 = (void *)pkt->data; palive2 = (void *)pkt->data;
mvm->error_event_table = mvm->error_event_table =
@ -156,6 +158,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
IWL_DEBUG_FW(mvm, IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n", "UMAC version: Major - 0x%x, Minor - 0x%x\n",
palive2->umac_major, palive2->umac_minor); palive2->umac_major, palive2->umac_minor);
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
mvm->error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
mvm->log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive->error_info_addr);
mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
alive_data->valid = le16_to_cpu(palive->status) ==
IWL_ALIVE_STATUS_OK;
if (mvm->umac_error_event_table)
mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive->status), palive->ver_type,
palive->ver_subtype, palive->flags);
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
le32_to_cpu(palive->umac_major),
le32_to_cpu(palive->umac_minor));
} }
return true; return true;
@ -188,8 +217,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_sf_region st_fwrd_space; struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR && if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
else else
fw = iwl_get_ucode_image(mvm, ucode_type); fw = iwl_get_ucode_image(mvm, ucode_type);
@ -451,20 +479,80 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
iwl_free_resp(&cmd); iwl_free_resp(&cmd);
} }
void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm) int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
unsigned int delay)
{ {
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY;
if (WARN_ON(mvm->fw_dump_desc))
iwl_mvm_free_fw_dump_desc(mvm);
IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
mvm->fw_dump_desc = desc;
/* stop recording */ /* stop recording */
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
} else { } else {
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); /* wait before we collect the data till the DBGC stop */
udelay(100);
} }
schedule_work(&mvm->fw_error_dump_wk); queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
return 0;
} }
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id) int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, unsigned int delay)
{
struct iwl_mvm_dump_desc *desc;
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
desc->len = len;
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
}
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *str, size_t len)
{
unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret;
if (!occurrences)
return 0;
ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), str,
len, delay);
if (ret)
return ret;
trigger->occurrences = cpu_to_le16(occurrences - 1);
return 0;
}
static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
{
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
else
iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
}
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
{ {
u8 *ptr; u8 *ptr;
int ret; int ret;
@ -474,6 +562,14 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
"Invalid configuration %d\n", conf_id)) "Invalid configuration %d\n", conf_id))
return -EINVAL; return -EINVAL;
/* EARLY START - firmware's configuration is hard coded */
if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
!mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
conf_id == FW_DBG_START_FROM_ALIVE) {
iwl_mvm_restart_early_start(mvm);
return 0;
}
if (!mvm->fw->dbg_conf_tlv[conf_id]) if (!mvm->fw->dbg_conf_tlv[conf_id])
return -EINVAL; return -EINVAL;
@ -583,7 +679,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
mvm->fw_dbg_conf = FW_DBG_INVALID; mvm->fw_dbg_conf = FW_DBG_INVALID;
iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM); /* if we have a destination, assume EARLY START */
if (mvm->fw->dbg_dest_tlv)
mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret) if (ret)

View file

@ -244,6 +244,7 @@ static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif) struct ieee80211_vif *exclude_vif)
{ {
u8 sta_id;
struct iwl_mvm_hw_queues_iface_iterator_data data = { struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif, .exclude_vif = exclude_vif,
.used_hw_queues = .used_hw_queues =
@ -264,6 +265,13 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
iwl_mvm_mac_sta_hw_queues_iter, iwl_mvm_mac_sta_hw_queues_iter,
&data); &data);
/*
* Some TDLS stations may be removed but are in the process of being
* drained. Don't touch their queues.
*/
for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
data.used_hw_queues |= mvm->tfd_drained[sta_id];
return data.used_hw_queues; return data.used_hw_queues;
} }
@ -1367,10 +1375,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
{ {
struct iwl_missed_beacons_notif *missed_beacons = _data; struct iwl_missed_beacons_notif *missed_beacons = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
return; return;
rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
rx_missed_bcon_since_rx =
le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
/* /*
* TODO: the threshold should be adjusted based on latency conditions, * TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs. * and/or in case of a CS flow on one of the other AP vifs.
@ -1378,6 +1394,26 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
IWL_MVM_MISSED_BEACONS_THRESHOLD) IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif); ieee80211_beacon_loss(vif);
if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
FW_DBG_TRIGGER_MISSED_BEACONS))
return;
trigger = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_MISSED_BEACONS);
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
stop_trig_missed_bcon_since_rx =
le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
/* TODO: implement start trigger */
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
return;
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL, 0);
} }
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,

View file

@ -339,13 +339,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto) !iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE; hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN || hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { hw->wiphy->features |=
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@ -889,12 +886,23 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags); iwl_trans_release_nic_access(mvm->trans, &flags);
} }
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
{
if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
!mvm->fw_dump_desc)
return;
kfree(mvm->fw_dump_desc);
mvm->fw_dump_desc = NULL;
}
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{ {
struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_info *dump_info;
struct iwl_fw_error_dump_mem *dump_mem; struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump; struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs; u32 sram_len, sram_ofs;
u32 file_len, fifo_data_len = 0; u32 file_len, fifo_data_len = 0;
@ -964,6 +972,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
fifo_data_len + fifo_data_len +
sizeof(*dump_info); sizeof(*dump_info);
if (mvm->fw_dump_desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
/* Make room for the SMEM, if it exists */ /* Make room for the SMEM, if it exists */
if (smem_len) if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
@ -975,6 +987,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_file = vzalloc(file_len); dump_file = vzalloc(file_len);
if (!dump_file) { if (!dump_file) {
kfree(fw_error_dump); kfree(fw_error_dump);
iwl_mvm_free_fw_dump_desc(mvm);
return; return;
} }
@ -1003,6 +1016,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
iwl_mvm_dump_fifos(mvm, &dump_data); iwl_mvm_dump_fifos(mvm, &dump_data);
if (mvm->fw_dump_desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
mvm->fw_dump_desc->len);
dump_trig = (void *)dump_data->data;
memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
sizeof(*dump_trig) + mvm->fw_dump_desc->len);
/* now we can free this copy */
iwl_mvm_free_fw_dump_desc(mvm);
dump_data = iwl_fw_error_next_data(dump_data);
}
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
@ -1041,16 +1067,26 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
} }
struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
.trig_desc = {
.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
},
};
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{ {
/* clear the D3 reconfig, we only need it to avoid dumping a /* clear the D3 reconfig, we only need it to avoid dumping a
* firmware coredump on reconfiguration, we shouldn't do that * firmware coredump on reconfiguration, we shouldn't do that
* on D3->D0 transition * on D3->D0 transition
*/ */
if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
iwl_mvm_fw_error_dump(mvm); iwl_mvm_fw_error_dump(mvm);
}
/* cleanup all stale references (scan, roc), but keep the /* cleanup all stale references (scan, roc), but keep the
* ucode_down ref until reconfig is complete * ucode_down ref until reconfig is complete
@ -1091,6 +1127,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->vif_count = 0; mvm->vif_count = 0;
mvm->rx_ba_sessions = 0; mvm->rx_ba_sessions = 0;
mvm->fw_dbg_conf = FW_DBG_INVALID;
/* keep statistics ticking */
iwl_mvm_accu_radio_stats(mvm);
} }
int __iwl_mvm_mac_start(struct iwl_mvm *mvm) int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
@ -1213,6 +1253,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{ {
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* firmware counters are obviously reset now, but we shouldn't
* partially track so also clear the fw_reset_accu counters.
*/
memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
/* /*
* Disallow low power states when the FW is down by taking * Disallow low power states when the FW is down by taking
* the UCODE_DOWN ref. in case of ongoing hw restart the * the UCODE_DOWN ref. in case of ongoing hw restart the
@ -1252,7 +1297,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk); flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->fw_error_dump_wk); cancel_delayed_work_sync(&mvm->fw_dump_wk);
iwl_mvm_free_fw_dump_desc(mvm);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm); __iwl_mvm_mac_stop(mvm);
@ -1317,6 +1363,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
/* make sure that beacon statistics don't go backwards with FW reset */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
/* Allocate resources for the MAC context, and add it to the fw */ /* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif); ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret) if (ret)
@ -1810,6 +1861,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) { if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) { if (bss_conf->assoc) {
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
memset(&mvmvif->beacon_stats, 0,
sizeof(mvmvif->beacon_stats));
/* add quota for this interface */ /* add quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL); ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret) { if (ret) {
@ -2196,10 +2252,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
ret = iwl_mvm_scan_umac(mvm, vif, hw_req); ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
else else
ret = iwl_mvm_scan_request(mvm, vif, req); ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
if (ret) if (ret)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
@ -2527,13 +2581,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
/* Newest FW fixes sched scan while connected on another interface */ if (!vif->bss_conf.idle) {
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
if (!vif->bss_conf.idle) {
ret = -EBUSY;
goto out;
}
} else if (!iwl_mvm_is_idle(mvm)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
@ -3433,6 +3481,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1); chsw->chandef.center_freq1);
iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH,
NULL, 0);
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
csa_vif = csa_vif =
@ -3581,6 +3632,95 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
} }
} }
static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
memset(survey, 0, sizeof(*survey));
/* only support global statistics right now */
if (idx != 0)
return -ENOENT;
if (!(mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
if (mvm->ucode_loaded) {
ret = iwl_mvm_request_statistics(mvm, false);
if (ret)
goto out;
}
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_SCAN;
survey->time = mvm->accu_radio_stats.on_time_rf +
mvm->radio_stats.on_time_rf;
do_div(survey->time, USEC_PER_MSEC);
survey->time_rx = mvm->accu_radio_stats.rx_time +
mvm->radio_stats.rx_time;
do_div(survey->time_rx, USEC_PER_MSEC);
survey->time_tx = mvm->accu_radio_stats.tx_time +
mvm->radio_stats.tx_time;
do_div(survey->time_tx, USEC_PER_MSEC);
survey->time_scan = mvm->accu_radio_stats.on_time_scan +
mvm->radio_stats.on_time_scan;
do_div(survey->time_scan, USEC_PER_MSEC);
out:
mutex_unlock(&mvm->mutex);
return ret;
}
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!(mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
if (!vif->bss_conf.assoc)
return;
mutex_lock(&mvm->mutex);
if (mvmvif->ap_sta_id != mvmsta->sta_id)
goto unlock;
if (iwl_mvm_request_statistics(mvm, false))
goto unlock;
sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
mvmvif->beacon_stats.accu_num_beacons;
sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
if (mvmvif->beacon_stats.avg_signal) {
/* firmware only reports a value after RXing a few beacons */
sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
unlock:
mutex_unlock(&mvm->mutex);
}
const struct ieee80211_ops iwl_mvm_hw_ops = { const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx, .tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action, .ampdu_action = iwl_mvm_mac_ampdu_action,
@ -3647,4 +3787,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif #endif
.set_default_unicast_key = iwl_mvm_set_default_unicast_key, .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
#endif #endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
}; };

View file

@ -75,6 +75,7 @@
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-notif-wait.h" #include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h" #include "iwl-eeprom-parse.h"
#include "iwl-fw-file.h"
#include "sta.h" #include "sta.h"
#include "fw-api.h" #include "fw-api.h"
#include "constants.h" #include "constants.h"
@ -145,6 +146,19 @@ struct iwl_mvm_dump_ptrs {
u32 op_mode_len; u32 op_mode_len;
}; };
/**
* struct iwl_mvm_dump_desc - describes the dump
* @len: length of trig_desc->data
* @trig_desc: the description of the dump
*/
struct iwl_mvm_dump_desc {
size_t len;
/* must be last */
struct iwl_fw_error_dump_trigger_desc trig_desc;
};
extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
struct iwl_mvm_phy_ctxt { struct iwl_mvm_phy_ctxt {
u16 id; u16 id;
u16 color; u16 color;
@ -337,6 +351,9 @@ struct iwl_mvm_vif_bf_data {
* @beacon_skb: the skb used to hold the AP/GO beacon template * @beacon_skb: the skb used to hold the AP/GO beacon template
* @smps_requests: the SMPS requests of differents parts of the driver, * @smps_requests: the SMPS requests of differents parts of the driver,
* combined on update to yield the overall request to mac80211. * combined on update to yield the overall request to mac80211.
* @beacon_stats: beacon statistics, containing the # of received beacons,
* # of received beacons accumulated over FW restart, and the current
* average signal of beacons retrieved from the firmware
*/ */
struct iwl_mvm_vif { struct iwl_mvm_vif {
u16 id; u16 id;
@ -354,6 +371,11 @@ struct iwl_mvm_vif {
bool ps_disabled; bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data; struct iwl_mvm_vif_bf_data bf_data;
struct {
u32 num_beacons, accu_num_beacons;
u8 avg_signal;
} beacon_stats;
u32 ap_beacon_time; u32 ap_beacon_time;
enum iwl_tsf_id tsf_id; enum iwl_tsf_id tsf_id;
@ -593,6 +615,13 @@ struct iwl_mvm {
struct mvm_statistics_rx rx_stats; struct mvm_statistics_rx rx_stats;
struct {
u64 rx_time;
u64 tx_time;
u64 on_time_rf;
u64 on_time_scan;
} radio_stats, accu_radio_stats;
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
@ -666,6 +695,7 @@ struct iwl_mvm {
struct iwl_mvm_frame_stats drv_rx_stats; struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock; spinlock_t drv_stats_lock;
u16 dbgfs_rx_phyinfo;
#endif #endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@ -687,8 +717,9 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */ /* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw; s8 restart_fw;
struct work_struct fw_error_dump_wk; u8 fw_dbg_conf;
enum iwl_fw_dbg_conf fw_dbg_conf; struct delayed_work fw_dump_wk;
struct iwl_mvm_dump_desc *fw_dump_desc;
#ifdef CONFIG_IWLWIFI_LEDS #ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led; struct led_classdev led;
@ -824,6 +855,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG, IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_DUMPING_FW_LOG,
}; };
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@ -883,6 +915,12 @@ static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
} }
static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
{
return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
IWL_MVM_BT_COEX_CORUNNING;
}
extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info { struct iwl_rate_info {
@ -951,12 +989,13 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
} }
/* Statistics */ /* Statistics */
int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm, void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_packet *pkt);
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */ /* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
@ -1072,13 +1111,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
/* Scanning */ /* Scanning */
int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
@ -1089,14 +1121,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies);
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req); struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req, struct cfg80211_sched_scan_request *req,
@ -1238,7 +1264,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac); struct ieee80211_tx_info *info, u8 ac);
bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@ -1352,9 +1377,6 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout); iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
} }
/* Assoc status */
bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
/* Thermal management and CT-kill */ /* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@ -1405,7 +1427,62 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id); int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm); int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, unsigned int delay);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
unsigned int delay);
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *str, size_t len);
static inline bool
iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
struct ieee80211_vif *vif)
{
u32 trig_vif = le32_to_cpu(trig->vif_type);
return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
}
static inline bool
iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig)
{
return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
(mvm->fw_dbg_conf == FW_DBG_INVALID ||
(BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
}
static inline bool
iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig)
{
if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
return false;
return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
}
static inline void
iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len)
{
struct iwl_fw_dbg_trigger_tlv *trigger;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
return;
trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, str, len);
}
#endif /* __IWL_MVM_H__ */ #endif /* __IWL_MVM_H__ */

View file

@ -237,8 +237,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_ITERATION_COMPLETE, RX_HANDLER(SCAN_ITERATION_COMPLETE,
iwl_mvm_rx_scan_offload_iter_complete_notif, false), iwl_mvm_rx_scan_offload_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE, RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
@ -311,6 +309,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_RX_MPDU_CMD), CMD(REPLY_RX_MPDU_CMD),
CMD(BEACON_NOTIFICATION), CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD), CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_CMD),
CMD(STATISTICS_NOTIFICATION), CMD(STATISTICS_NOTIFICATION),
CMD(EOSP_NOTIFICATION), CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD), CMD(REDUCE_TX_POWER_CMD),
@ -456,7 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->d0i3_tx_lock);
@ -504,6 +503,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv)); sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
/* set up notification wait support */ /* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait); iwl_notification_wait_init(&mvm->notif_wait);
@ -685,6 +685,38 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_cmd *cmds_trig;
char buf[32];
int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
/* don't collect on CMD 0 */
if (!cmds_trig->cmds[i].cmd_id)
break;
if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
continue;
memset(buf, 0, sizeof(buf));
snprintf(buf, sizeof(buf), "CMD 0x%02x received", pkt->hdr.cmd);
iwl_mvm_fw_dbg_collect_trig(mvm, trig, buf, sizeof(buf));
break;
}
}
static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
@ -693,6 +725,8 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 i; u8 i;
iwl_mvm_rx_check_trigger(mvm, pkt);
/* /*
* Do the notification wait before RX handlers so * Do the notification wait before RX handlers so
* even if the RX handler consumes the RXB we have * even if the RX handler consumes the RXB we have
@ -827,7 +861,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
{ {
struct iwl_mvm *mvm = struct iwl_mvm *mvm =
container_of(work, struct iwl_mvm, fw_error_dump_wk); container_of(work, struct iwl_mvm, fw_dump_wk.work);
if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
return; return;
@ -879,7 +913,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended. * can't recover this since we're already half suspended.
*/ */
if (!mvm->restart_fw && fw_error) { if (!mvm->restart_fw && fw_error) {
schedule_work(&mvm->fw_error_dump_wk); iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) { &mvm->status)) {
struct iwl_mvm_reprobe *reprobe; struct iwl_mvm_reprobe *reprobe;

View file

@ -175,6 +175,8 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt << cmd->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS); PHY_RX_CHAIN_MIMO_CNT_POS);
if (unlikely(mvm->dbgfs_rx_phyinfo))
cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
} }

View file

@ -134,9 +134,12 @@ enum rs_column_mode {
#define MAX_NEXT_COLUMNS 7 #define MAX_NEXT_COLUMNS 7
#define MAX_COLUMN_CHECKS 3 #define MAX_COLUMN_CHECKS 3
struct rs_tx_column;
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl); struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col);
struct rs_tx_column { struct rs_tx_column {
enum rs_column_mode mode; enum rs_column_mode mode;
@ -147,14 +150,19 @@ struct rs_tx_column {
}; };
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl) struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col)
{ {
return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
} }
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl) struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col)
{ {
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
if (!sta->ht_cap.ht_supported) if (!sta->ht_cap.ht_supported)
return false; return false;
@ -167,11 +175,17 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
return false; return false;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
return false;
return true; return true;
} }
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl) struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col)
{ {
if (!sta->ht_cap.ht_supported) if (!sta->ht_cap.ht_supported)
return false; return false;
@ -180,7 +194,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
} }
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl) struct iwl_scale_tbl_info *tbl,
const struct rs_tx_column *next_col)
{ {
struct rs_rate *rate = &tbl->rate; struct rs_rate *rate = &tbl->rate;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@ -800,6 +815,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->ldpc = true; rate->ldpc = true;
if (ucode_rate & RATE_MCS_VHT_STBC_MSK) if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
rate->stbc = true; rate->stbc = true;
if (ucode_rate & RATE_MCS_BF_MSK)
rate->bfer = true;
rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
@ -809,7 +826,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) { if (nss == 1) {
rate->type = LQ_HT_SISO; rate->type = LQ_HT_SISO;
WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
"stbc %d bfer %d",
rate->stbc, rate->bfer);
} else if (nss == 2) { } else if (nss == 2) {
rate->type = LQ_HT_MIMO2; rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2); WARN_ON_ONCE(num_of_ant != 2);
@ -822,7 +841,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) { if (nss == 1) {
rate->type = LQ_VHT_SISO; rate->type = LQ_VHT_SISO;
WARN_ON_ONCE(!rate->stbc && num_of_ant != 1); WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
"stbc %d bfer %d",
rate->stbc, rate->bfer);
} else if (nss == 2) { } else if (nss == 2) {
rate->type = LQ_VHT_MIMO2; rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2); WARN_ON_ONCE(num_of_ant != 2);
@ -1001,13 +1022,41 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rs_get_lower_rate_in_column(lq_sta, rate); rs_get_lower_rate_in_column(lq_sta, rate);
} }
/* Simple function to compare two rate scale table types */ /* Check if both rates are identical
static inline bool rs_rate_match(struct rs_rate *a, * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
struct rs_rate *b) * with a rate indicating STBC/BFER and ANT_AB.
*/
static inline bool rs_rate_equal(struct rs_rate *a,
struct rs_rate *b,
bool allow_ant_mismatch)
{
bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
(a->bfer == b->bfer);
if (allow_ant_mismatch) {
if (a->stbc || a->bfer) {
WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
a->stbc, a->bfer, a->ant);
ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
} else if (b->stbc || b->bfer) {
WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
b->stbc, b->bfer, b->ant);
ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
}
}
return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
(a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
}
/* Check if both rates share the same column */
static inline bool rs_rate_column_match(struct rs_rate *a,
struct rs_rate *b)
{ {
bool ant_match; bool ant_match;
if (a->stbc) if (a->stbc || a->bfer)
ant_match = (b->ant == ANT_A || b->ant == ANT_B); ant_match = (b->ant == ANT_A || b->ant == ANT_B);
else else
ant_match = (a->ant == b->ant); ant_match = (a->ant == b->ant);
@ -1016,18 +1065,6 @@ static inline bool rs_rate_match(struct rs_rate *a,
&& ant_match; && ant_match;
} }
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
{
if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
return RATE_MCS_CHAN_WIDTH_40;
else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
return RATE_MCS_CHAN_WIDTH_80;
else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
return RATE_MCS_CHAN_WIDTH_160;
return RATE_MCS_CHAN_WIDTH_20;
}
static u8 rs_get_tid(struct ieee80211_hdr *hdr) static u8 rs_get_tid(struct ieee80211_hdr *hdr)
{ {
u8 tid = IWL_MAX_TID_COUNT; u8 tid = IWL_MAX_TID_COUNT;
@ -1048,15 +1085,17 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{ {
int legacy_success; int legacy_success;
int retries; int retries;
int mac_index, i; int i;
struct iwl_lq_cmd *table; struct iwl_lq_cmd *table;
enum mac80211_rate_control_flags mac_flags; u32 lq_hwrate;
u32 ucode_rate; struct rs_rate lq_rate, tx_resp_rate;
struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_LQ_SS_PARAMS;
/* Treat uninitialized rate scaling data same as non-existing. */ /* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) { if (!lq_sta) {
@ -1079,39 +1118,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU)) !(info->flags & IEEE80211_TX_STAT_AMPDU))
return; return;
/*
* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
* from a previous Link Quality command, but we're no longer interested
* in those; they're either from the "active" mode while we're trying
* to check "search" mode, or a prior "search" mode after we've moved
* to a new "search" mode (which might become the new "active" mode).
*/
table = &lq_sta->lq;
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
if (info->band == IEEE80211_BAND_5GHZ)
rate.index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
/* For HT packets, map MCS to PLCP */
if (mac_flags & IEEE80211_TX_RC_MCS) {
/* Remove # of streams */
mac_index &= RATE_HT_MCS_RATE_CODE_MSK;
if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
mac_index++;
/*
* mac80211 HT index is always zero-indexed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
if (info->band == IEEE80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
} else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
mac_index++;
}
if (time_after(jiffies, if (time_after(jiffies,
(unsigned long)(lq_sta->last_tx + (unsigned long)(lq_sta->last_tx +
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
@ -1126,21 +1132,24 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
} }
lq_sta->last_tx = jiffies; lq_sta->last_tx = jiffies;
/* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
* from a previous Link Quality command, but we're no longer interested
* in those; they're either from the "active" mode while we're trying
* to check "search" mode, or a prior "search" mode after we've moved
* to a new "search" mode (which might become the new "active" mode).
*/
table = &lq_sta->lq;
lq_hwrate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
/* Here we actually compare this rate to the latest LQ command */ /* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) || if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
(rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
(rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
(rate.ant != info->status.antenna) ||
(!!(ucode_rate & RATE_MCS_HT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
(!!(ucode_rate & RATE_MCS_VHT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
(!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
(rate.index != mac_index)) {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n", "initial tx resp rate 0x%x does not match 0x%x\n",
mac_index, rate.index, ucode_rate); tx_resp_hwrate, lq_hwrate);
/* /*
* Since rates mis-match, the last LQ command may have failed. * Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@ -1168,14 +1177,14 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
} }
if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) { if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n"); "Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
rs_dump_rate(mvm, &rate, "ACTUAL"); rs_dump_rate(mvm, &lq_rate, "ACTUAL");
/* /*
* no matching table found, let's by-pass the data collection * no matching table found, let's by-pass the data collection
@ -1200,9 +1209,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (info->status.ampdu_ack_len == 0) if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1; info->status.ampdu_len = 1;
ucode_rate = le32_to_cpu(table->rs_table[0]); rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
info->status.ampdu_len, info->status.ampdu_len,
info->status.ampdu_ack_len, info->status.ampdu_ack_len,
reduced_txp); reduced_txp);
@ -1225,21 +1232,23 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */ /* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) { for (i = 0; i <= retries; ++i) {
ucode_rate = le32_to_cpu(table->rs_table[i]); lq_hwrate = le32_to_cpu(table->rs_table[i]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); rs_rate_from_ucode_rate(lq_hwrate, info->band,
&lq_rate);
/* /*
* Only collect stats if retried rate is in the same RS * Only collect stats if retried rate is in the same RS
* table as active/search. * table as active/search.
*/ */
if (rs_rate_match(&rate, &curr_tbl->rate)) if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
tmp_tbl = curr_tbl; tmp_tbl = curr_tbl;
else if (rs_rate_match(&rate, &other_tbl->rate)) else if (rs_rate_column_match(&lq_rate,
&other_tbl->rate))
tmp_tbl = other_tbl; tmp_tbl = other_tbl;
else else
continue; continue;
rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1, rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
i < retries ? 0 : legacy_success, 1, i < retries ? 0 : legacy_success,
reduced_txp); reduced_txp);
} }
@ -1250,7 +1259,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
} }
} }
/* The last TX rate is cached in lq_sta; it's set in if/else above */ /* The last TX rate is cached in lq_sta; it's set in if/else above */
lq_sta->last_rate_n_flags = ucode_rate; lq_sta->last_rate_n_flags = lq_hwrate;
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done: done:
/* See if there's a better rate or modulation mode to try. */ /* See if there's a better rate or modulation mode to try. */
@ -1590,7 +1599,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
for (j = 0; j < MAX_COLUMN_CHECKS; j++) { for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
allow_func = next_col->checks[j]; allow_func = next_col->checks[j];
if (allow_func && !allow_func(mvm, sta, tbl)) if (allow_func && !allow_func(mvm, sta, tbl, next_col))
break; break;
} }
@ -2536,6 +2545,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->pers.dbg_fixed_rate = 0; lq_sta->pers.dbg_fixed_rate = 0;
lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
#endif #endif
lq_sta->pers.chains = 0; lq_sta->pers.chains = 0;
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
@ -3058,19 +3068,21 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
goto out; goto out;
#ifdef CONFIG_MAC80211_DEBUGFS
/* Check if forcing the decision is configured. /* Check if forcing the decision is configured.
* Note that SISO is forced by not allowing STBC or BFER * Note that SISO is forced by not allowing STBC or BFER
*/ */
if (lq_sta->ss_force == RS_SS_FORCE_STBC) if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE); ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
else if (lq_sta->ss_force == RS_SS_FORCE_BFER) else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE); ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
if (lq_sta->ss_force != RS_SS_FORCE_NONE) { if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n", IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
lq_sta->ss_force); lq_sta->pers.ss_force);
goto out; goto out;
} }
#endif
if (lq_sta->stbc_capable) if (lq_sta->stbc_capable)
ss_params |= LQ_SS_STBC_1SS_ALLOWED; ss_params |= LQ_SS_STBC_1SS_ALLOWED;
@ -3311,6 +3323,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate; struct rs_rate *rate = &tbl->rate;
u32 ss_params;
mvm = lq_sta->pers.drv; mvm = lq_sta->pers.drv;
buff = kmalloc(2048, GFP_KERNEL); buff = kmalloc(2048, GFP_KERNEL);
if (!buff) if (!buff)
@ -3357,6 +3370,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.agg_frame_cnt_limit); lq_sta->lq.agg_frame_cnt_limit);
desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
ss_params = le32_to_cpu(lq_sta->lq.ss_params);
desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
(ss_params & LQ_SS_PARAMS_VALID) ?
"VALID," : "INVALID",
(ss_params & LQ_SS_BFER_ALLOWED) ?
"BFER," : "",
(ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
"STBC," : "",
(ss_params & LQ_SS_FORCE) ?
"FORCE" : "");
desc += sprintf(buff+desc, desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0], lq_sta->lq.initial_rate_index[0],
@ -3533,7 +3556,7 @@ static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
}; };
pos += scnprintf(buf+pos, bufsz-pos, "%s\n", pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
ss_force_name[lq_sta->ss_force]); ss_force_name[lq_sta->pers.ss_force]);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos); return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
} }
@ -3544,12 +3567,12 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
int ret = 0; int ret = 0;
if (!strncmp("none", buf, 4)) { if (!strncmp("none", buf, 4)) {
lq_sta->ss_force = RS_SS_FORCE_NONE; lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
} else if (!strncmp("siso", buf, 4)) { } else if (!strncmp("siso", buf, 4)) {
lq_sta->ss_force = RS_SS_FORCE_SISO; lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
} else if (!strncmp("stbc", buf, 4)) { } else if (!strncmp("stbc", buf, 4)) {
if (lq_sta->stbc_capable) { if (lq_sta->stbc_capable) {
lq_sta->ss_force = RS_SS_FORCE_STBC; lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
} else { } else {
IWL_ERR(mvm, IWL_ERR(mvm,
"can't force STBC. peer doesn't support\n"); "can't force STBC. peer doesn't support\n");
@ -3557,7 +3580,7 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
} }
} else if (!strncmp("bfer", buf, 4)) { } else if (!strncmp("bfer", buf, 4)) {
if (lq_sta->bfer_capable) { if (lq_sta->bfer_capable) {
lq_sta->ss_force = RS_SS_FORCE_BFER; lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
} else { } else {
IWL_ERR(mvm, IWL_ERR(mvm,
"can't force BFER. peer doesn't support\n"); "can't force BFER. peer doesn't support\n");

View file

@ -170,6 +170,7 @@ struct rs_rate {
bool sgi; bool sgi;
bool ldpc; bool ldpc;
bool stbc; bool stbc;
bool bfer;
}; };
@ -331,14 +332,14 @@ struct iwl_lq_sta {
/* tx power reduce for this sta */ /* tx power reduce for this sta */
int tpc_reduce; int tpc_reduce;
/* force STBC/BFER/SISO for testing */
enum rs_ss_force_opt ss_force;
/* persistent fields - initialized only once - keep last! */ /* persistent fields - initialized only once - keep last! */
struct lq_sta_pers { struct lq_sta_pers {
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
u32 dbg_fixed_rate; u32 dbg_fixed_rate;
u8 dbg_fixed_txp_reduction; u8 dbg_fixed_txp_reduction;
/* force STBC/BFER/SISO for testing */
enum rs_ss_force_opt ss_force;
#endif #endif
u8 chains; u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 chain_signal[IEEE80211_MAX_CHAINS];

View file

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -345,6 +345,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
bool trig_check;
s32 rssi;
trig = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
trig_check =
iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
trig);
if (trig_check && rx_status->signal < rssi)
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
}
} }
rcu_read_unlock(); rcu_read_unlock();
@ -416,35 +435,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
} }
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
struct iwl_notif_statistics *stats) struct mvm_statistics_rx *rx_stats)
{ {
/*
* NOTE FW aggregates the statistics - BUT the statistics are cleared
* when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
* bit set.
*/
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
mvm->rx_stats = *rx_stats;
} }
struct iwl_mvm_stat_data { struct iwl_mvm_stat_data {
struct iwl_notif_statistics *stats;
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
__le32 mac_id;
__s8 beacon_filter_average_energy;
struct mvm_statistics_general_v8 *general;
}; };
static void iwl_mvm_stat_iterator(void *_data, u8 *mac, static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
struct iwl_mvm_stat_data *data = _data; struct iwl_mvm_stat_data *data = _data;
struct iwl_notif_statistics *stats = data->stats;
struct iwl_mvm *mvm = data->mvm; struct iwl_mvm *mvm = data->mvm;
int sig = -stats->general.beacon_filter_average_energy; int sig = -data->beacon_filter_average_energy;
int last_event; int last_event;
int thold = vif->bss_conf.cqm_rssi_thold; int thold = vif->bss_conf.cqm_rssi_thold;
int hyst = vif->bss_conf.cqm_rssi_hyst; int hyst = vif->bss_conf.cqm_rssi_hyst;
u16 id = le32_to_cpu(stats->rx.general.mac_id); u16 id = le32_to_cpu(data->mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* This doesn't need the MAC ID check since it's not taking the
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
if (data->general) {
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
mvmvif->beacon_stats.avg_signal =
-data->general->beacon_average_energy[mvmvif->id];
}
if (mvmvif->id != id) if (mvmvif->id != id)
return; return;
@ -500,34 +527,101 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
} }
} }
/* static inline void
* iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
*
* TODO: This handler is implemented partially.
*/
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_notif_statistics *stats = (void *)&pkt->data; struct iwl_fw_dbg_trigger_stats *trig_stats;
u32 trig_offset, trig_thold;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
return;
trig_offset = le32_to_cpu(trig_stats->stop_offset);
trig_thold = le32_to_cpu(trig_stats->stop_threshold);
if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
return;
if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL, 0);
}
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
size_t v8_len = sizeof(struct iwl_notif_statistics_v8);
size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
struct iwl_mvm_stat_data data = { struct iwl_mvm_stat_data data = {
.stats = stats,
.mvm = mvm, .mvm = mvm,
}; };
u32 temperature;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v10_len)
goto invalid;
temperature = le32_to_cpu(stats->general.radio_temperature);
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
mvm->radio_stats.on_time_rf =
le64_to_cpu(stats->general.on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.on_time_scan);
data.general = &stats->general;
} else {
struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v8_len)
goto invalid;
temperature = le32_to_cpu(stats->general.radio_temperature);
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
/* Only handle rx statistics temperature changes if async temp /* Only handle rx statistics temperature changes if async temp
* notifications are not supported * notifications are not supported
*/ */
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM)) if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm, iwl_mvm_tt_temp_changed(mvm, temperature);
le32_to_cpu(stats->general.radio_temperature));
iwl_mvm_update_rx_statistics(mvm, stats);
ieee80211_iterate_active_interfaces(mvm->hw, ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator, iwl_mvm_stat_iterator,
&data); &data);
return;
invalid:
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
iwl_rx_packet_payload_len(pkt));
}
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
return 0; return 0;
} }

View file

@ -82,6 +82,7 @@ struct iwl_mvm_scan_params {
struct _dwell { struct _dwell {
u16 passive; u16 passive;
u16 active; u16 active;
u16 fragmented;
} dwell[IEEE80211_NUM_BANDS]; } dwell[IEEE80211_NUM_BANDS];
}; };
@ -191,101 +192,6 @@ static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
} }
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct cfg80211_scan_request *req,
bool basic_ssid,
struct iwl_mvm_scan_params *params)
{
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
int type = BIT(req->n_ssids) - 1;
enum ieee80211_band band = req->channels[0]->band;
if (!basic_ssid)
type |= BIT(req->n_ssids);
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(params->dwell[band].active);
chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
chan->iteration_count = cpu_to_le16(1);
chan++;
}
}
/*
* Fill in probe request with the following parameters:
* TA is our vif HW address, which mac80211 ensures we have.
* Packet is broadcasted, so this is both SA and DA.
* The probe request IE is made out of two: first comes the most prioritized
* SSID if a directed scan is requested. Second comes whatever extra
* information was given to us as the scan request IE.
*/
static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
int n_ssids, const u8 *ssid, int ssid_len,
const u8 *band_ie, int band_ie_len,
const u8 *common_ie, int common_ie_len,
int left)
{
int len = 0;
u8 *pos = NULL;
/* Make sure there is enough space for the probe request,
* two mandatory IEs and the data */
left -= 24;
if (left < 0)
return 0;
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(frame->da);
memcpy(frame->sa, ta, ETH_ALEN);
eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0;
len += 24;
/* for passive scans, no need to fill anything */
if (n_ssids == 0)
return (u16)len;
/* points to the payload of the request */
pos = &frame->u.probe_req.variable[0];
/* fill in our SSID IE */
left -= ssid_len + 2;
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
*pos++ = ssid_len;
if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
memcpy(pos, ssid, ssid_len);
pos += ssid_len;
}
len += ssid_len + 2;
if (WARN_ON(left < band_ie_len + common_ie_len))
return len;
if (band_ie && band_ie_len) {
memcpy(pos, band_ie, band_ie_len);
pos += band_ie_len;
len += band_ie_len;
}
if (common_ie && common_ie_len) {
memcpy(pos, common_ie, common_ie_len);
pos += common_ie_len;
len += common_ie_len;
}
return (u16)len;
}
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
@ -325,7 +231,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
* If there is more than one active interface make * If there is more than one active interface make
* passive scan more fragmented. * passive scan more fragmented.
*/ */
frag_passive_dwell = (global_cnt < 2) ? 40 : 20; frag_passive_dwell = 40;
params->max_out_time = frag_passive_dwell; params->max_out_time = frag_passive_dwell;
} else { } else {
params->suspend_time = 120; params->suspend_time = 120;
@ -358,10 +264,10 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
if (params->passive_fragmented) if (params->passive_fragmented)
params->dwell[band].passive = frag_passive_dwell; params->dwell[band].fragmented = frag_passive_dwell;
else
params->dwell[band].passive = params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
iwl_mvm_get_passive_dwell(mvm, band); band);
params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids); n_ssids);
} }
@ -379,20 +285,11 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
{ {
int max_probe_len; int max_probe_len;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
else
max_probe_len = mvm->fw->ucode_capa.max_probe_length;
/* we create the 802.11 header and SSID element */ /* we create the 802.11 header and SSID element */
max_probe_len -= 24 + 2; max_probe_len -= 24 + 2;
/* basic ssid is added only for hw_scan with and old api */
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
!is_sched_scan)
max_probe_len -= 32;
/* DS parameter set element is added on 2.4GHZ band if required */ /* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm)) if (iwl_mvm_rrm_scan_needed(mvm))
max_probe_len -= 3; max_probe_len -= 3;
@ -404,9 +301,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
{ {
int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan); int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
return max_ie_len;
/* TODO: [BUG] This function should return the maximum allowed size of /* TODO: [BUG] This function should return the maximum allowed size of
* scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
* in the same command. So the correct implementation of this function * in the same command. So the correct implementation of this function
@ -420,129 +314,6 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
return max_ie_len; return max_ie_len;
} }
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct iwl_host_cmd hcmd = {
.id = SCAN_REQUEST_CMD,
.len = { 0, },
.data = { mvm->scan_cmd, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
int ret;
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
bool basic_ssid = !(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
/* we should have failed registration if scan_cmd was NULL */
if (WARN_ON(mvm->scan_cmd == NULL))
return -ENOMEM;
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
mvm->scan_status = IWL_MVM_SCAN_OS;
memset(cmd, 0, ksize(cmd));
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
cmd->max_out_time = cpu_to_le32(params.max_out_time);
cmd->suspend_time = cpu_to_le32(params.suspend_time);
if (params.passive_fragmented)
cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
else
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
cmd->repeats = cpu_to_le32(1);
/*
* If the user asked for passive scan, don't change to active scan if
* you see any activity on the channel - remain passive.
*/
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
if (basic_ssid) {
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
}
} else {
cmd->passive2active = 0;
cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
basic_ssid ? 1 : 0);
cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
3 << TX_CMD_FLG_BT_PRIO_POS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
req->no_cck);
cmd->tx_cmd.len =
cpu_to_le16(iwl_mvm_fill_probe_req(
(struct ieee80211_mgmt *)cmd->data,
vif->addr,
req->n_ssids, ssid, ssid_len,
req->ie, req->ie_len, NULL, 0,
mvm->fw->ucode_capa.max_probe_length));
iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
(cmd->channel_count * sizeof(struct iwl_scan_channel)));
hcmd.len[0] = le16_to_cpu(cmd->len);
status = SCAN_RESPONSE_OK;
ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
if (!ret && status == SCAN_RESPONSE_OK) {
IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
} else {
/*
* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
status, ret);
mvm->scan_status = IWL_MVM_SCAN_NONE;
ret = -EIO;
}
return ret;
}
int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_cmd_response *resp = (void *)pkt->data;
IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
le32_to_cpu(resp->status));
return 0;
}
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
@ -556,130 +327,25 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
return 0; return 0;
} }
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_complete_notif *notif = (void *)pkt->data;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
if (mvm->scan_status == IWL_MVM_SCAN_OS)
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
return 0;
}
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
struct iwl_sched_scan_results *notif = (void *)pkt->data;
if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
return 0;
}
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw); ieee80211_sched_scan_results(mvm->hw);
return 0; return 0;
} }
static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_scan_complete_notif *notif;
u32 *resp;
switch (pkt->hdr.cmd) {
case SCAN_ABORT_CMD:
resp = (void *)pkt->data;
if (*resp == CAN_ABORT_STATUS) {
IWL_DEBUG_SCAN(mvm,
"Scan can be aborted, wait until completion\n");
return false;
}
/*
* If scan cannot be aborted, it means that we had a
* SCAN_COMPLETE_NOTIFICATION in the pipe and it called
* ieee80211_scan_completed already.
*/
IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
*resp);
return true;
case SCAN_COMPLETE_NOTIFICATION:
notif = (void *)pkt->data;
IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
notif->status);
return true;
default:
WARN_ON(1);
return false;
};
}
static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_scan_abort;
static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
SCAN_COMPLETE_NOTIFICATION };
int ret;
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
scan_abort_notif,
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
return ret;
}
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
u8 status, ebs_status; struct iwl_periodic_scan_complete *scan_notif;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) { scan_notif = (void *)pkt->data;
struct iwl_periodic_scan_complete *scan_notif;
scan_notif = (void *)pkt->data;
status = scan_notif->status;
ebs_status = scan_notif->ebs_status;
} else {
struct iwl_scan_offload_complete *scan_notif;
scan_notif = (void *)pkt->data;
status = scan_notif->status;
ebs_status = scan_notif->ebs_status;
}
/* scan status must be locked for proper checking */ /* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
@ -687,9 +353,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
"%s completed, status %s, EBS status %s\n", "%s completed, status %s, EBS status %s\n",
mvm->scan_status == IWL_MVM_SCAN_SCHED ? mvm->scan_status == IWL_MVM_SCAN_SCHED ?
"Scheduled scan" : "Scan", "Scheduled scan" : "Scan",
status == IWL_SCAN_OFFLOAD_COMPLETED ? scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted", "completed" : "aborted",
ebs_status == IWL_SCAN_EBS_SUCCESS ? scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
"success" : "failed"); "success" : "failed");
@ -700,64 +366,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_status == IWL_MVM_SCAN_OS) { } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, ieee80211_scan_completed(mvm->hw,
status == IWL_SCAN_OFFLOAD_ABORTED); scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} }
if (ebs_status) if (scan_notif->ebs_status)
mvm->last_ebs_successful = false; mvm->last_ebs_successful = false;
return 0; return 0;
} }
static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies,
enum ieee80211_band band,
struct iwl_tx_cmd *cmd,
u8 *data)
{
u16 cmd_len;
cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->sta_id = mvm->aux_sta.sta_id;
cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
vif->addr,
1, NULL, 0,
ies->ies[band], ies->len[band],
ies->common_ies, ies->common_ie_len,
SCAN_OFFLOAD_PROBE_REQ_SIZE);
cmd->len = cpu_to_le16(cmd_len);
}
static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_offload_cmd *scan,
struct iwl_mvm_scan_params *params)
{
scan->channel_count = req->n_channels;
scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
scan->max_out_time = cpu_to_le32(params->max_out_time);
scan->suspend_time = cpu_to_le32(params->suspend_time);
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
scan->rep_count = cpu_to_le32(1);
if (params->passive_fragmented)
scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
{ {
int i; int i;
@ -815,127 +433,6 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
} }
} }
static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
u8 *channels_buffer,
enum ieee80211_band band,
int *head,
u32 ssid_bitmap,
struct iwl_mvm_scan_params *params)
{
u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
__le32 *type = (__le32 *)channels_buffer;
__le16 *channel_number = (__le16 *)(type + n_channels);
__le16 *iter_count = channel_number + n_channels;
__le32 *iter_interval = (__le32 *)(iter_count + n_channels);
u8 *active_dwell = (u8 *)(iter_interval + n_channels);
u8 *passive_dwell = active_dwell + n_channels;
int i, index = 0;
for (i = 0; i < req->n_channels; i++) {
struct ieee80211_channel *chan = req->channels[i];
if (chan->band != band)
continue;
index = *head;
(*head)++;
channel_number[index] = cpu_to_le16(chan->hw_value);
active_dwell[index] = params->dwell[band].active;
passive_dwell[index] = params->dwell[band].passive;
iter_count[index] = cpu_to_le16(1);
iter_interval[index] = 0;
if (!(chan->flags & IEEE80211_CHAN_NO_IR))
type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
if (chan->flags & IEEE80211_CHAN_NO_HT40)
type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
/* scan for all SSIDs from req->ssids */
type[index] |= cpu_to_le32(ssid_bitmap);
}
}
int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
u32 ssid_bitmap;
int cmd_len;
int ret;
u8 *probes;
bool basic_ssid = !(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
struct iwl_scan_offload_cfg *scan_cfg;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_CONFIG_CMD,
};
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
cmd_len = sizeof(struct iwl_scan_offload_cfg) +
mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
if (!scan_cfg)
return -ENOMEM;
probes = scan_cfg->data +
mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
&ssid_bitmap, basic_ssid);
/* build tx frames for supported bands */
if (band_2ghz) {
iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
IEEE80211_BAND_2GHZ,
&scan_cfg->scan_cmd.tx_cmd[0],
probes);
iwl_build_channel_cfg(mvm, req, scan_cfg->data,
IEEE80211_BAND_2GHZ, &head,
ssid_bitmap, &params);
}
if (band_5ghz) {
iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
IEEE80211_BAND_5GHZ,
&scan_cfg->scan_cmd.tx_cmd[1],
probes +
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, scan_cfg->data,
IEEE80211_BAND_5GHZ, &head,
ssid_bitmap, &params);
}
cmd.data[0] = scan_cfg;
cmd.len[0] = cmd_len;
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
kfree(scan_cfg);
return ret;
}
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req) struct cfg80211_sched_scan_request *req)
{ {
@ -1018,33 +515,6 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
return true; return true;
} }
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_req scan_req = {
.watchdog = IWL_SCHED_SCAN_WATCHDOG,
.schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
.schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
.schedule_line[0].full_scan_mul = 1,
.schedule_line[1].iterations = 0xff,
.schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
.schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
};
if (iwl_mvm_scan_pass_all(mvm, req))
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
if (mvm->last_ebs_successful &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
scan_req.flags |=
cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
sizeof(scan_req), &scan_req);
}
int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req, struct cfg80211_sched_scan_request *req,
@ -1057,21 +527,12 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
if (ret) if (ret)
return ret; return ret;
ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies); ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
} else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { } else {
mvm->scan_status = IWL_MVM_SCAN_SCHED; mvm->scan_status = IWL_MVM_SCAN_SCHED;
ret = iwl_mvm_config_sched_scan_profiles(mvm, req); ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret) if (ret)
return ret; return ret;
ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
} else {
mvm->scan_status = IWL_MVM_SCAN_SCHED;
ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
if (ret)
return ret;
ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret)
return ret;
ret = iwl_mvm_sched_scan_start(mvm, req);
} }
return ret; return ret;
@ -1088,9 +549,7 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
/* Exit instantly with error when device is not ready /* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform * to receive scan abort command or it does not perform
* scheduled scan currently */ * scheduled scan currently */
if (mvm->scan_status != IWL_MVM_SCAN_SCHED && if (mvm->scan_status == IWL_MVM_SCAN_NONE)
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS))
return -EIO; return -EIO;
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
@ -1131,13 +590,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (iwl_mvm_is_radio_killed(mvm)) if (iwl_mvm_is_radio_killed(mvm))
goto out; goto out;
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
return 0;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif, scan_done_notif,
ARRAY_SIZE(scan_done_notif), ARRAY_SIZE(scan_done_notif),
@ -1317,7 +769,7 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented) if (params->passive_fragmented)
cmd->fragmented_dwell = cmd->fragmented_dwell =
params->dwell[IEEE80211_BAND_2GHZ].passive; params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time); cmd->suspend_time = cpu_to_le32(params->suspend_time);
@ -1580,9 +1032,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
return 0; return 0;
} }
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_cancel_regular_scan(mvm);
} }
/* UMAC scan API */ /* UMAC scan API */
@ -1765,7 +1215,7 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented) if (params->passive_fragmented)
cmd->fragmented_dwell = cmd->fragmented_dwell =
params->dwell[IEEE80211_BAND_2GHZ].passive; params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time); cmd->suspend_time = cpu_to_le32(params->suspend_time);
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
@ -2159,14 +1609,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
mvm->fw->ucode_capa.n_scan_channels + mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail); sizeof(struct iwl_scan_req_umac_tail);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) return sizeof(struct iwl_scan_req_unified_lmac) +
return sizeof(struct iwl_scan_req_unified_lmac) + sizeof(struct iwl_scan_channel_cfg_lmac) *
sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels +
mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_probe_req);
sizeof(struct iwl_scan_probe_req);
return sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
mvm->fw->ucode_capa.n_scan_channels *
sizeof(struct iwl_scan_channel);
} }

View file

@ -664,6 +664,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->status.rates[0].count = tx_resp->failure_frame + 1; info->status.rates[0].count = tx_resp->failure_frame + 1;
iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
info); info);
info->status.status_driver_data[1] =
(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
/* Single frame failure in an AMPDU queue => send BAR */ /* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue && if (txq_id >= mvm->first_agg_queue &&
@ -909,6 +911,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
info->status.tx_time = tid_data->tx_time; info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] = info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc; (void *)(uintptr_t)tid_data->reduced_tpc;
info->status.status_driver_data[1] =
(void *)(uintptr_t)tid_data->rate_n_flags;
} }
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,

View file

@ -332,7 +332,7 @@ static const char *desc_lookup(u32 num)
* read with u32-sized accesses, any members with a different size * read with u32-sized accesses, any members with a different size
* need to be ordered correctly though! * need to be ordered correctly though!
*/ */
struct iwl_error_event_table { struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */ u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */ u32 error_id; /* type of error */
u32 pc; /* program counter */ u32 pc; /* program counter */
@ -377,7 +377,55 @@ struct iwl_error_event_table {
u32 u_timestamp; /* indicate when the date and time of the u32 u_timestamp; /* indicate when the date and time of the
* compilation */ * compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */ u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed; } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 gp3; /* GP3 timer register */
u32 major; /* uCode version major */
u32 minor; /* uCode version minor */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
/* /*
* UMAC error struct - relevant starting from family 8000 chip. * UMAC error struct - relevant starting from family 8000 chip.
@ -396,11 +444,11 @@ struct iwl_umac_error_event_table {
u32 data1; /* error-specific data */ u32 data1; /* error-specific data */
u32 data2; /* error-specific data */ u32 data2; /* error-specific data */
u32 data3; /* error-specific data */ u32 data3; /* error-specific data */
u32 umac_fw_ver; /* UMAC version */ u32 umac_major;
u32 umac_fw_api_ver; /* UMAC FW API ver */ u32 umac_minor;
u32 frame_pointer; /* core register 27*/ u32 frame_pointer; /* core register 27*/
u32 stack_pointer; /* core register 28 */ u32 stack_pointer; /* core register 28 */
u32 cmd_header; /* latest host cmd sent to UMAC */ u32 cmd_header; /* latest host cmd sent to UMAC */
u32 nic_isr_pref; /* ISR status register */ u32 nic_isr_pref; /* ISR status register */
} __packed; } __packed;
@ -441,18 +489,18 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver); IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver); IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
} }
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
{ {
struct iwl_trans *trans = mvm->trans; struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table; struct iwl_error_event_table_v1 table;
u32 base; u32 base;
base = mvm->error_event_table; base = mvm->error_event_table;
@ -489,7 +537,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
table.data1, table.data2, table.data3, table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1, table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1, table.ilink2, table.bcon_time, table.gp1,
table.gp2, table.gp3, table.ucode_ver, table.gp2, table.gp3, table.ucode_ver, 0,
table.hw_ver, table.brd_ver); table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id)); desc_lookup(table.error_id));
@ -530,6 +578,92 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm); iwl_mvm_dump_umac_error_log(mvm);
} }
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
u32 base;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
iwl_mvm_dump_nic_error_log_old(mvm);
return;
}
base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
} else {
if (!base)
base = mvm->fw->inst_errlog_ptr;
}
if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->cur_ucode == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
mvm->status, table.valid);
}
/* Do not change this output - scripts rely on it */
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
table.gp2, table.gp3, table.major,
table.minor, table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout) unsigned int wdg_timeout)
@ -643,6 +777,40 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode); ieee80211_request_smps(vif, smps_mode);
} }
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
{
struct iwl_statistics_cmd scmd = {
.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
};
struct iwl_host_cmd cmd = {
.id = STATISTICS_CMD,
.len[0] = sizeof(scmd),
.data[0] = &scmd,
.flags = CMD_WANT_SKB,
};
int ret;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret)
return ret;
iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
iwl_free_resp(&cmd);
if (clear)
iwl_mvm_accu_radio_stats(mvm);
return 0;
}
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
{
mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
}
static void iwl_mvm_diversity_iter(void *_data, u8 *mac, static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
@ -717,25 +885,6 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result; return result;
} }
static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
bool *idle = _data;
if (!vif->bss_conf.idle)
*idle = false;
}
bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
{
bool idle = true;
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_idle_iter, &idle);
return idle;
}
struct iwl_bss_iter_data { struct iwl_bss_iter_data {
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
bool error; bool error;

View file

@ -898,6 +898,9 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
IWL_DEBUG_FW(trans, "working with %s CPU\n", IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single"); image->is_dual_cpus ? "Dual" : "Single");
if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans);
/* configure the ucode to be ready to get the secured image */ /* configure the ucode to be ready to get the secured image */
/* release CPU reset */ /* release CPU reset */
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
@ -914,9 +917,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
if (ret) if (ret)
return ret; return ret;
if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans);
/* wait for image verification to complete */ /* wait for image verification to complete */
ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0, ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
LMPM_SECURE_BOOT_STATUS_SUCCESS, LMPM_SECURE_BOOT_STATUS_SUCCESS,

View file

@ -742,8 +742,7 @@ void lbs_debugfs_init(void)
void lbs_debugfs_remove(void) void lbs_debugfs_remove(void)
{ {
if (lbs_dir) debugfs_remove(lbs_dir);
debugfs_remove(lbs_dir);
} }
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)

View file

@ -2397,7 +2397,6 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
} }
#define MWIFIEX_MAX_WQ_LEN 30
/* /*
* create a new virtual interface with the given name * create a new virtual interface with the given name
*/ */
@ -2411,7 +2410,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv; struct mwifiex_private *priv;
struct net_device *dev; struct net_device *dev;
void *mdev_priv; void *mdev_priv;
char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
if (!adapter) if (!adapter)
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
@ -2576,12 +2574,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC"); priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
strcat(dfs_cac_str, name);
priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
WQ_HIGHPRI | WQ_HIGHPRI |
WQ_MEM_RECLAIM | WQ_MEM_RECLAIM |
WQ_UNBOUND, 1); WQ_UNBOUND, 1, name);
if (!priv->dfs_cac_workqueue) { if (!priv->dfs_cac_workqueue) {
wiphy_err(wiphy, "cannot register virtual network device\n"); wiphy_err(wiphy, "cannot register virtual network device\n");
free_netdev(dev); free_netdev(dev);
@ -2594,11 +2590,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue); INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW"); priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s",
strcat(dfs_chsw_str, name);
priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
WQ_HIGHPRI | WQ_UNBOUND | WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1); WQ_MEM_RECLAIM, 1, name);
if (!priv->dfs_chan_sw_workqueue) { if (!priv->dfs_chan_sw_workqueue) {
wiphy_err(wiphy, "cannot register virtual network device\n"); wiphy_err(wiphy, "cannot register virtual network device\n");
free_netdev(dev); free_netdev(dev);

View file

@ -33,6 +33,7 @@
#define MWIFIEX_MAX_BSS_NUM (3) #define MWIFIEX_MAX_BSS_NUM (3)
#define MWIFIEX_DMA_ALIGN_SZ 64 #define MWIFIEX_DMA_ALIGN_SZ 64
#define MWIFIEX_RX_HEADROOM 64
#define MAX_TXPD_SZ 32 #define MAX_TXPD_SZ 32
#define INTF_HDR_ALIGN 4 #define INTF_HDR_ALIGN 4

View file

@ -296,7 +296,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0; adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
adapter->ext_scan = false;
adapter->key_api_major_ver = 0; adapter->key_api_major_ver = 0;
adapter->key_api_minor_ver = 0; adapter->key_api_minor_ver = 0;
eth_broadcast_addr(adapter->perm_addr); eth_broadcast_addr(adapter->perm_addr);

View file

@ -190,14 +190,16 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
/* Check if already processing */ /* Check if already processing */
if (adapter->mwifiex_processing) { if (adapter->mwifiex_processing) {
adapter->more_task_flag = true;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags); spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto exit_main_proc; goto exit_main_proc;
} else { } else {
adapter->mwifiex_processing = true; adapter->mwifiex_processing = true;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
} }
process_start: process_start:
do { do {
adapter->more_task_flag = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) || if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break; break;
@ -238,6 +240,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
adapter->pm_wakeup_fw_try = true; adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
adapter->if_ops.wakeup(adapter); adapter->if_ops.wakeup(adapter);
spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue; continue;
} }
@ -295,8 +298,10 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
if ((adapter->ps_state == PS_STATE_SLEEP) || if ((adapter->ps_state == PS_STATE_SLEEP) ||
(adapter->ps_state == PS_STATE_PRE_SLEEP) || (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
(adapter->ps_state == PS_STATE_SLEEP_CFM) || (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
adapter->tx_lock_flag) adapter->tx_lock_flag){
spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue; continue;
}
if (!adapter->cmd_sent && !adapter->curr_cmd) { if (!adapter->cmd_sent && !adapter->curr_cmd) {
if (mwifiex_exec_next_cmd(adapter) == -1) { if (mwifiex_exec_next_cmd(adapter) == -1) {
@ -330,15 +335,12 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
} }
break; break;
} }
spin_lock_irqsave(&adapter->main_proc_lock, flags);
} while (true); } while (true);
spin_lock_irqsave(&adapter->main_proc_lock, flags); spin_lock_irqsave(&adapter->main_proc_lock, flags);
if (!adapter->delay_main_work && if (adapter->more_task_flag)
(adapter->int_status || IS_CARD_RX_RCVD(adapter))) {
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start; goto process_start;
}
adapter->mwifiex_processing = false; adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags); spin_unlock_irqrestore(&adapter->main_proc_lock, flags);

View file

@ -140,6 +140,9 @@ enum {
#define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000 #define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
/* Address alignment */
#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
struct mwifiex_dbg { struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure; u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure;
@ -774,6 +777,7 @@ struct mwifiex_adapter {
/* spin lock for main process */ /* spin lock for main process */
spinlock_t main_proc_lock; spinlock_t main_proc_lock;
u32 mwifiex_processing; u32 mwifiex_processing;
u8 more_task_flag;
u16 tx_buf_size; u16 tx_buf_size;
u16 curr_tx_buf_size; u16 curr_tx_buf_size;
u32 ioport; u32 ioport;
@ -1417,6 +1421,7 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info); u8 rx_rate, u8 ht_info);
void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void); void mwifiex_debugfs_init(void);

View file

@ -203,7 +203,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.reg = data->reg; card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size; card->pcie.tx_buf_size = data->tx_buf_size;
card->pcie.supports_fw_dump = data->supports_fw_dump; card->pcie.can_dump_fw = data->can_dump_fw;
card->pcie.can_ext_scan = data->can_ext_scan; card->pcie.can_ext_scan = data->can_ext_scan;
} }
@ -498,7 +498,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
/* Allocate skb here so that firmware can DMA data from it */ /* Allocate skb here so that firmware can DMA data from it */
skb = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb) { if (!skb) {
dev_err(adapter->dev, dev_err(adapter->dev,
"Unable to allocate skb for RX ring.\n"); "Unable to allocate skb for RX ring.\n");
@ -1297,7 +1298,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
} }
} }
skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb_tmp) { if (!skb_tmp) {
dev_err(adapter->dev, dev_err(adapter->dev,
"Unable to allocate skb.\n"); "Unable to allocate skb.\n");
@ -2271,7 +2273,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
int ret; int ret;
static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
if (!card->pcie.supports_fw_dump) if (!card->pcie.can_dump_fw)
return; return;
for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {

View file

@ -205,7 +205,7 @@ struct mwifiex_pcie_device {
const struct mwifiex_pcie_card_reg *reg; const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl; u16 blksz_fw_dl;
u16 tx_buf_size; u16 tx_buf_size;
bool supports_fw_dump; bool can_dump_fw;
bool can_ext_scan; bool can_ext_scan;
}; };
@ -214,7 +214,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
.reg = &mwifiex_reg_8766, .reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.supports_fw_dump = false, .can_dump_fw = false,
.can_ext_scan = true, .can_ext_scan = true,
}; };
@ -223,7 +223,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.reg = &mwifiex_reg_8897, .reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.supports_fw_dump = true, .can_dump_fw = true,
.can_ext_scan = true, .can_ext_scan = true,
}; };

View file

@ -105,8 +105,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->tx_buf_size = data->tx_buf_size; card->tx_buf_size = data->tx_buf_size;
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size; card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->supports_fw_dump = data->supports_fw_dump; card->can_dump_fw = data->can_dump_fw;
card->auto_tdls = data->auto_tdls; card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan; card->can_ext_scan = data->can_ext_scan;
} }
@ -1357,7 +1357,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
return -1; return -1;
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
skb = dev_alloc_skb(rx_len); skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb) if (!skb)
return -1; return -1;
@ -1454,7 +1454,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
} }
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
skb = dev_alloc_skb(rx_len); skb = mwifiex_alloc_rx_buf(rx_len,
GFP_KERNEL | GFP_DMA);
if (!skb) { if (!skb) {
dev_err(adapter->dev, "%s: failed to alloc skb", dev_err(adapter->dev, "%s: failed to alloc skb",
@ -1887,7 +1888,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
return -1; return -1;
} }
adapter->auto_tdls = card->auto_tdls; adapter->auto_tdls = card->can_auto_tdls;
adapter->ext_scan = card->can_ext_scan; adapter->ext_scan = card->can_ext_scan;
return ret; return ret;
} }
@ -2032,7 +2033,7 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
mwifiex_dump_drv_info(adapter); mwifiex_dump_drv_info(adapter);
if (!card->supports_fw_dump) if (!card->can_dump_fw)
return; return;
for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {

View file

@ -238,9 +238,6 @@ struct sdio_mmc_card {
const struct mwifiex_sdio_card_reg *reg; const struct mwifiex_sdio_card_reg *reg;
u8 max_ports; u8 max_ports;
u8 mp_agg_pkt_limit; u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
bool supports_fw_dump;
u16 tx_buf_size; u16 tx_buf_size;
u32 mp_tx_agg_buf_size; u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size; u32 mp_rx_agg_buf_size;
@ -255,7 +252,10 @@ struct sdio_mmc_card {
u8 curr_wr_port; u8 curr_wr_port;
u8 *mp_regs; u8 *mp_regs;
u8 auto_tdls; bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
bool can_auto_tdls;
bool can_ext_scan; bool can_ext_scan;
struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_tx mpa_tx;
@ -267,13 +267,13 @@ struct mwifiex_sdio_device {
const struct mwifiex_sdio_card_reg *reg; const struct mwifiex_sdio_card_reg *reg;
u8 max_ports; u8 max_ports;
u8 mp_agg_pkt_limit; u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
bool supports_fw_dump;
u16 tx_buf_size; u16 tx_buf_size;
u32 mp_tx_agg_buf_size; u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size; u32 mp_rx_agg_buf_size;
u8 auto_tdls; bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
bool can_auto_tdls;
bool can_ext_scan; bool can_ext_scan;
}; };
@ -412,13 +412,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.reg = &mwifiex_reg_sd87xx, .reg = &mwifiex_reg_sd87xx,
.max_ports = 16, .max_ports = 16,
.mp_agg_pkt_limit = 8, .mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false, .supports_sdio_new_mode = false,
.auto_tdls = false, .has_control_mask = true,
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = false, .can_ext_scan = false,
}; };
@ -427,13 +427,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.reg = &mwifiex_reg_sd87xx, .reg = &mwifiex_reg_sd87xx,
.max_ports = 16, .max_ports = 16,
.mp_agg_pkt_limit = 8, .mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false, .supports_sdio_new_mode = false,
.auto_tdls = false, .has_control_mask = true,
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = true, .can_ext_scan = true,
}; };
@ -442,13 +442,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.reg = &mwifiex_reg_sd87xx, .reg = &mwifiex_reg_sd87xx,
.max_ports = 16, .max_ports = 16,
.mp_agg_pkt_limit = 8, .mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false, .supports_sdio_new_mode = false,
.auto_tdls = false, .has_control_mask = true,
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = true, .can_ext_scan = true,
}; };
@ -457,13 +457,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.reg = &mwifiex_reg_sd8897, .reg = &mwifiex_reg_sd8897,
.max_ports = 32, .max_ports = 32,
.mp_agg_pkt_limit = 16, .mp_agg_pkt_limit = 16,
.supports_sdio_new_mode = true,
.has_control_mask = false,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = true, .supports_sdio_new_mode = true,
.auto_tdls = false, .has_control_mask = false,
.can_dump_fw = true,
.can_auto_tdls = false,
.can_ext_scan = true, .can_ext_scan = true,
}; };
@ -472,13 +472,13 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.reg = &mwifiex_reg_sd8887, .reg = &mwifiex_reg_sd8887,
.max_ports = 32, .max_ports = 32,
.mp_agg_pkt_limit = 16, .mp_agg_pkt_limit = 16,
.supports_sdio_new_mode = true, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.has_control_mask = false,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = false, .supports_sdio_new_mode = true,
.auto_tdls = true, .has_control_mask = false,
.can_dump_fw = false,
.can_auto_tdls = true,
.can_ext_scan = true, .can_ext_scan = true,
}; };
@ -492,8 +492,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false, .can_dump_fw = false,
.auto_tdls = false, .can_auto_tdls = false,
.can_ext_scan = true, .can_ext_scan = true,
}; };

Some files were not shown because too many files have changed in this diff Show more